file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
models.py
import datetime from django.db import models from django.utils import timezone # Create your models here. class Question(models.Model): question_text = models.CharField(max_length=200) pub_date = models.DateTimeField('date published') def __str__(self): return self.question_text def was_published_recently(self): now = timezone.now() return now - datetime.timedelta(days=1) <= self.pub_date <= now # def was_published_recently(self): # now = timezone.now() # return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Choice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) def __str__(self): return self.choice_text
was_published_recently.admin_order_field = 'pub_date' was_published_recently.boolean = True was_published_recently.short_description = 'Published recently?'
__init__.py
from .standard import Urllib2Transport from .curl import PycurlTransport import os def get_transport(transport_type=None, os_module=os):
def __get_transport_type(transport_type, os_module): if not transport_type: use_curl = os_module.getenv('LWR_CURL_TRANSPORT', "0") # If LWR_CURL_TRANSPORT is unset or set to 0, use default, # else use curl. if use_curl.isdigit() and not int(use_curl): transport_type = 'urllib' else: transport_type = 'curl' return transport_type # TODO: Provide urllib implementation if these unavailable, # also explore a requests+poster option. from .curl import get_file from .curl import post_file __all__ = [get_transport, get_file, post_file]
transport_type = __get_transport_type(transport_type, os_module) if transport_type == 'urllib': transport = Urllib2Transport() else: transport = PycurlTransport() return transport
suite_test.go
/* Copyright 2019 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package grpcvtgateconn // This is agnostic of grpc and was in a separate package 'vtgateconntest'. // This has been moved here for better readability. If we introduce // protocols other than grpc in the future, this will have to be // moved back to its own package for reusability. import ( "errors" "fmt" "io" "strings" "testing" "context" "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vtgateconn" "vitess.io/vitess/go/vt/vtgate/vtgateservice" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // fakeVTGateService has the server side of this fake type fakeVTGateService struct { t *testing.T panics bool hasError bool errorWait chan struct{} } const ( expectedErrMatch string = "test vtgate error" expectedCode vtrpcpb.Code = vtrpcpb.Code_INVALID_ARGUMENT ) var errTestVtGateError = vterrors.New(expectedCode, expectedErrMatch) func newContext() context.Context { ctx := context.Background() ctx = callerid.NewContext(ctx, testCallerID, nil) return ctx } func (f *fakeVTGateService) checkCallerID(ctx context.Context, name string) { ef := callerid.EffectiveCallerIDFromContext(ctx) if ef == nil { f.t.Errorf("no effective caller id for %v", name) } else { if !proto.Equal(ef, testCallerID) { f.t.Errorf("invalid effective caller id for %v: got %v expected %v", name, ef, testCallerID) } } } // queryExecute contains all the fields we use to test Execute type queryExecute struct { SQL string BindVariables map[string]*querypb.BindVariable Session *vtgatepb.Session } func (q *queryExecute) equal(q2 *queryExecute) bool { return q.SQL == q2.SQL && sqltypes.BindVariablesEqual(q.BindVariables, q2.BindVariables) && proto.Equal(q.Session, q2.Session) } // Execute is part of the VTGateService interface func (f *fakeVTGateService) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if f.hasError { return session, nil, errTestVtGateError } if f.panics { panic(fmt.Errorf("test forced panic")) } f.checkCallerID(ctx, "Execute") execCase, ok := execMap[sql] if !ok { return session, nil, fmt.Errorf("no match for: %s", sql) } query := &queryExecute{ SQL: sql, BindVariables: bindVariables, Session: session, } if !query.equal(execCase.execQuery) { f.t.Errorf("Execute:\n%+v, want\n%+v", query, execCase.execQuery) return session, nil, nil } if execCase.outSession != nil { *session = *execCase.outSession } return session, execCase.result, nil } // ExecuteBatch is part of the VTGateService interface func (f *fakeVTGateService) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { if f.hasError { return session, nil, errTestVtGateError } if f.panics { panic(fmt.Errorf("test forced panic")) } f.checkCallerID(ctx, "ExecuteBatch") execCase, ok := execMap[sqlList[0]] if !ok { return session, nil, fmt.Errorf("no match for: %s", sqlList[0]) } query := &queryExecute{ SQL: sqlList[0], BindVariables: bindVariablesList[0], Session: session, } if !query.equal(execCase.execQuery) { f.t.Errorf("Execute: %+v, want %+v", query, execCase.execQuery) return session, nil, nil } if execCase.outSession != nil { *session = *execCase.outSession } return session, []sqltypes.QueryResponse{{ QueryResult: execCase.result, QueryError: nil, }}, nil } // StreamExecute is part of the VTGateService interface func (f *fakeVTGateService) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { if f.panics { panic(fmt.Errorf("test forced panic")) } execCase, ok := execMap[sql] if !ok { return fmt.Errorf("no match for: %s", sql) } f.checkCallerID(ctx, "StreamExecute") query := &queryExecute{ SQL: sql, BindVariables: bindVariables, Session: session, } if !query.equal(execCase.execQuery) { f.t.Errorf("StreamExecute:\n%+v, want\n%+v", query, execCase.execQuery) return nil } if execCase.result != nil { result := &sqltypes.Result{ Fields: execCase.result.Fields, } if err := callback(result); err != nil { return err } if f.hasError { // wait until the client has the response, since all streaming implementation may not // send previous messages if an error has been triggered. <-f.errorWait f.errorWait = make(chan struct{}) // for next test return errTestVtGateError } for _, row := range execCase.result.Rows { result := &sqltypes.Result{ Rows: [][]sqltypes.Value{row}, } if err := callback(result); err != nil { return err } } } return nil } // ResolveTransaction is part of the VTGateService interface func (f *fakeVTGateService) ResolveTransaction(ctx context.Context, dtid string) error { if f.hasError { return errTestVtGateError } if f.panics { panic(fmt.Errorf("test forced panic")) } f.checkCallerID(ctx, "ResolveTransaction") if dtid != dtid2 { return errors.New("ResolveTransaction: dtid mismatch") } return nil } func (f *fakeVTGateService) VStream(ctx context.Context, tabletType topodatapb.TabletType, vgtid *binlogdatapb.VGtid, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { panic("unimplemented") } // CreateFakeServer returns the fake server for the tests func CreateFakeServer(t *testing.T) vtgateservice.VTGateService { return &fakeVTGateService{ t: t, panics: false, errorWait: make(chan struct{}), } } // RegisterTestDialProtocol registers a vtgateconn implementation under the "test" protocol func RegisterTestDialProtocol(impl vtgateconn.Impl) { vtgateconn.RegisterDialer("test", func(ctx context.Context, address string) (vtgateconn.Impl, error) { return impl, nil }) } // HandlePanic is part of the VTGateService interface func (f *fakeVTGateService) HandlePanic(err *error) { if x := recover(); x != nil { // gRPC 0.13 chokes when you return a streaming error that contains newlines. *err = fmt.Errorf("uncaught panic: %v, %s", x, strings.Replace(string(tb.Stack(4)), "\n", ";", -1)) } } // RunTests runs all the tests func RunTests(t *testing.T, impl vtgateconn.Impl, fakeServer vtgateservice.VTGateService) { vtgateconn.RegisterDialer("test", func(ctx context.Context, address string) (vtgateconn.Impl, error) { return impl, nil }) conn, err := vtgateconn.DialProtocol(context.Background(), "test", "") if err != nil { t.Fatalf("Got err: %v from vtgateconn.DialProtocol", err) } session := conn.Session("connection_ks@rdonly", testExecuteOptions) fs := fakeServer.(*fakeVTGateService) testExecute(t, session) testStreamExecute(t, session) testExecuteBatch(t, session) // force a panic at every call, then test that works fs.panics = true testExecutePanic(t, session) testExecuteBatchPanic(t, session) testStreamExecutePanic(t, session) fs.panics = false } // RunErrorTests runs all the tests that expect errors func RunErrorTests(t *testing.T, fakeServer vtgateservice.VTGateService) { conn, err := vtgateconn.DialProtocol(context.Background(), "test", "") if err != nil { t.Fatalf("Got err: %v from vtgateconn.DialProtocol", err) } session := conn.Session("connection_ks@rdonly", testExecuteOptions) fs := fakeServer.(*fakeVTGateService) // return an error for every call, make sure they're handled properly fs.hasError = true testExecuteError(t, session, fs) testExecuteBatchError(t, session, fs) testStreamExecuteError(t, session, fs) fs.hasError = false } func expectPanic(t *testing.T, err error) { expected1 := "test forced panic" expected2 := "uncaught panic" if err == nil || !strings.Contains(err.Error(), expected1) || !strings.Contains(err.Error(), expected2) { t.Fatalf("Expected a panic error with '%v' or '%v' but got: %v", expected1, expected2, err) } } // Verifies the returned error has the properties that we expect. func verifyError(t *testing.T, err error, method string) { if err == nil { t.Errorf("%s was expecting an error, didn't get one", method) return } // verify error code code := vterrors.Code(err) if code != expectedCode { t.Errorf("Unexpected error code from %s: got %v, wanted %v", method, code, expectedCode) } verifyErrorString(t, err, method) } func verifyErrorString(t *testing.T, err error, method string) { if err == nil { t.Errorf("%s was expecting an error, didn't get one", method) return } if !strings.Contains(err.Error(), expectedErrMatch) { t.Errorf("Unexpected error from %s: got %v, wanted err containing: %v", method, err, errTestVtGateError.Error()) } } func testExecute(t *testing.T, session *vtgateconn.VTGateSession) { ctx := newContext() execCase := execMap["request1"] qr, err := session.Execute(ctx, execCase.execQuery.SQL, execCase.execQuery.BindVariables) require.NoError(t, err) if !qr.Equal(execCase.result) { t.Errorf("Unexpected result from Execute: got\n%#v want\n%#v", qr, execCase.result) } _, err = session.Execute(ctx, "none", nil) want := "no match for: none" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("none request: %v, want %v", err, want) } } func testExecuteError(t *testing.T, session *vtgateconn.VTGateSession, fake *fakeVTGateService) { ctx := newContext() execCase := execMap["errorRequst"] _, err := session.Execute(ctx, execCase.execQuery.SQL, execCase.execQuery.BindVariables) verifyError(t, err, "Execute") } func testExecutePanic(t *testing.T, session *vtgateconn.VTGateSession) { ctx := newContext() execCase := execMap["request1"] _, err := session.Execute(ctx, execCase.execQuery.SQL, execCase.execQuery.BindVariables) expectPanic(t, err) } func testExecuteBatch(t *testing.T, session *vtgateconn.VTGateSession) { ctx := newContext() execCase := execMap["request1"] qr, err := session.ExecuteBatch(ctx, []string{execCase.execQuery.SQL}, []map[string]*querypb.BindVariable{execCase.execQuery.BindVariables}) require.NoError(t, err) if !qr[0].QueryResult.Equal(execCase.result) { t.Errorf("Unexpected result from Execute: got\n%#v want\n%#v", qr, execCase.result) } _, err = session.ExecuteBatch(ctx, []string{"none"}, nil) want := "no match for: none" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("none request: %v, want %v", err, want) } } func testExecuteBatchError(t *testing.T, session *vtgateconn.VTGateSession, fake *fakeVTGateService) { ctx := newContext() execCase := execMap["errorRequst"] _, err := session.ExecuteBatch(ctx, []string{execCase.execQuery.SQL}, []map[string]*querypb.BindVariable{execCase.execQuery.BindVariables}) verifyError(t, err, "ExecuteBatch") } func testExecuteBatchPanic(t *testing.T, session *vtgateconn.VTGateSession) { ctx := newContext() execCase := execMap["request1"] _, err := session.ExecuteBatch(ctx, []string{execCase.execQuery.SQL}, []map[string]*querypb.BindVariable{execCase.execQuery.BindVariables}) expectPanic(t, err) } func testStreamExecute(t *testing.T, session *vtgateconn.VTGateSession) { ctx := newContext() execCase := execMap["request1"] stream, err := session.StreamExecute(ctx, execCase.execQuery.SQL, execCase.execQuery.BindVariables) if err != nil { t.Fatal(err) } var qr sqltypes.Result for { packet, err := stream.Recv() if err != nil { if err != io.EOF { t.Error(err) } break } if len(packet.Fields) != 0 { qr.Fields = packet.Fields } if len(packet.Rows) != 0 { qr.Rows = append(qr.Rows, packet.Rows...) } } wantResult := *execCase.result wantResult.RowsAffected = 0 wantResult.InsertID = 0 if !qr.Equal(&wantResult) { t.Errorf("Unexpected result from StreamExecute: got %+v want %+v", qr, wantResult) } stream, err = session.StreamExecute(ctx, "none", nil) if err != nil { t.Fatal(err) } _, err = stream.Recv() want := "no match for: none"
if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("none request: %v, want %v", err, want) } } func testStreamExecuteError(t *testing.T, session *vtgateconn.VTGateSession, fake *fakeVTGateService) { ctx := newContext() execCase := execMap["request1"] stream, err := session.StreamExecute(ctx, execCase.execQuery.SQL, execCase.execQuery.BindVariables) if err != nil { t.Fatalf("StreamExecute failed: %v", err) } qr, err := stream.Recv() if err != nil { t.Fatalf("StreamExecute failed: cannot read result1: %v", err) } if !qr.Equal(&streamResultFields) { t.Errorf("Unexpected result from StreamExecute: got %#v want %#v", qr, &streamResultFields) } // signal to the server that the first result has been received close(fake.errorWait) // After 1 result, we expect to get an error (no more results). _, err = stream.Recv() if err == nil { t.Fatalf("StreamExecute channel wasn't closed") } verifyError(t, err, "StreamExecute") } func testStreamExecutePanic(t *testing.T, session *vtgateconn.VTGateSession) { ctx := newContext() execCase := execMap["request1"] stream, err := session.StreamExecute(ctx, execCase.execQuery.SQL, execCase.execQuery.BindVariables) if err != nil { t.Fatal(err) } _, err = stream.Recv() if err == nil { t.Fatalf("Received packets instead of panic?") } expectPanic(t, err) } var testCallerID = &vtrpcpb.CallerID{ Principal: "test_principal", Component: "test_component", Subcomponent: "test_subcomponent", } var testExecuteOptions = &querypb.ExecuteOptions{ IncludedFields: querypb.ExecuteOptions_TYPE_ONLY, } var execMap = map[string]struct { execQuery *queryExecute result *sqltypes.Result outSession *vtgatepb.Session err error }{ "request1": { execQuery: &queryExecute{ SQL: "request1", BindVariables: map[string]*querypb.BindVariable{ "bind1": sqltypes.Int64BindVariable(0), }, Session: &vtgatepb.Session{ TargetString: "connection_ks@rdonly", Options: testExecuteOptions, Autocommit: true, }, }, result: &result1, }, "errorRequst": { execQuery: &queryExecute{ SQL: "errorRequst", BindVariables: map[string]*querypb.BindVariable{ "bind1": sqltypes.Int64BindVariable(0), }, Session: &vtgatepb.Session{ TargetString: "connection_ks@rdonly", Options: testExecuteOptions, }, }, }, } var result1 = sqltypes.Result{ Fields: []*querypb.Field{ { Name: "field1", Type: sqltypes.Int16, }, { Name: "field2", Type: sqltypes.Int32, }, }, RowsAffected: 123, InsertID: 72, Rows: [][]sqltypes.Value{ { sqltypes.TestValue(sqltypes.Int16, "1"), sqltypes.NULL, }, { sqltypes.TestValue(sqltypes.Int16, "2"), sqltypes.NewInt32(3), }, }, } // streamResultFields is only the fields, sent as the first packet var streamResultFields = sqltypes.Result{ Fields: result1.Fields, Rows: [][]sqltypes.Value{}, } var dtid2 = "aa"
move_semantics2.rs
// move_semantics2.rs // Make me compile without changing line 13! // Execute `rustlings hint move_semantics2` for hints :) fn main() { let vec0 = Vec::new();
let mut vec1 = fill_vec(vec0); vec1.push(88); println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1); } fn fill_vec(vec: Vec<i32>) -> Vec<i32> { let mut vec = vec; vec.push(22); vec.push(44); vec.push(66); vec }
// Do not change the following line! println!("{} has length {} content `{:?}`", "vec0", vec0.len(), vec0);
main.rs
// Copyright Kani Contributors // SPDX-License-Identifier: Apache-2.0 OR MIT // // Check if expect_fail uses new property class and description in it's check id #[kani::proof] fn
() { let i: i32 = kani::any(); kani::assume(i < 10); kani::expect_fail(i > 20, "Blocked by assumption above."); }
main
main.go
//============================================================================== // // drone-gdm/main.go: Drone plugin for Google Deployment Manager // // Copyright (c) 2017 The New York Times Company // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this library except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //------------------------------------------------------------------------------ package main import ( "fmt" drone "github.com/drone/drone-plugin-go/plugin" "os" ) var context *GdmPluginContext var lbl string = "[unknown]" var rev string = "[unknown]" // drone-gdm plugin entry point. func main() { fmt.Printf("Drone GDM Plugin %s - built from %s:\n", lbl, rev) var err error context, err = NewGdmPluginContext() if err != nil { errBail(err) } // https://godoc.org/github.com/drone/drone-plugin-go/plugin if len(os.Args) > 1 { workspace := drone.Workspace{} drone.Param("workspace", &workspace) drone.Param("vargs", &context) drone.Parse() context.Dir = workspace.Path } err = context.Parse() if err != nil { errBail(err) } err = context.Validate() if err != nil
err = context.Authenticate() if err != nil { errBail(err) } for _, spec := range context.Configurations { err = GdmExecute(context, &spec) if err != nil { errBail(err) } } os.Exit(0) } func errBail(err error) { fmt.Printf("\x1b[00;31mERROR: %s\n\x1b[00n", err) doCleanup() os.Exit(1) } func doCleanup() { err := context.Cleanup() if err != nil { // No need to panic on error; (likely ephemeral mount disappeared) fmt.Printf("drone-gdm: WARNING: cleanup failed with: %s\n", err) } } // EOF
{ errBail(err) }
ls_replica.go
package cmd import ( "fmt" "os" "text/tabwriter" "github.com/sirupsen/logrus" "github.com/urfave/cli" "github.com/longhorn/longhorn-engine/pkg/controller/client" replicaClient "github.com/longhorn/longhorn-engine/pkg/replica/client" "github.com/longhorn/longhorn-engine/pkg/types" ) func LsReplicaCmd() cli.Command { return cli.Command{ Name: "ls-replica", ShortName: "ls", Action: func(c *cli.Context) { if err := lsReplica(c); err != nil { logrus.Fatalf("Error running ls command: %v", err) } }, } } func getCli(c *cli.Context) *client.ControllerClient { url := c.GlobalString("url") return client.NewControllerClient(url) } func lsReplica(c *cli.Context) error { controllerClient := getCli(c) reps, err := controllerClient.ReplicaList() if err != nil { return err } format := "%s\t%s\t%v\n" tw := tabwriter.NewWriter(os.Stdout, 0, 20, 1, ' ', 0) fmt.Fprintf(tw, format, "ADDRESS", "MODE", "CHAIN") for _, r := range reps { if r.Mode == types.ERR { fmt.Fprintf(tw, format, r.Address, r.Mode, "") continue } chain := interface{}("") chainList, err := getChain(r.Address) if err == nil { chain = chainList } fmt.Fprintf(tw, format, r.Address, r.Mode, chain) } tw.Flush() return nil } func getChain(address string) ([]string, error)
{ repClient, err := replicaClient.NewReplicaClient(address) if err != nil { return nil, err } r, err := repClient.GetReplica() if err != nil { return nil, err } return r.Chain, err }
crypto.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. //! //! Cryptographic Functions //! use sgx_types::*; use sgx_types::marker::ContiguousMemory; use std::ops::{Drop, DerefMut}; use std::ptr; use std::mem; use std::cell::{Cell, RefCell}; /// /// The rsgx_sha256_msg function performs a standard SHA256 hash over the input data buffer. /// /// # Description /// /// The rsgx_sha256_msg function performs a standard SHA256 hash over the input data buffer. /// Only a 256-bit version of the SHA hash is supported. (Other sizes, for example 512, are /// not supported in this minimal cryptography library). /// /// The function should be used if the complete input data stream is available. /// Otherwise, the Init, Update… Update, Final procedure should be used to compute /// a SHA256 bit hash over multiple input data sets. /// /// # Parameters /// /// **src** /// /// A pointer to the input data stream to be hashed. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// The 256-bit hash that has been SHA256 calculated /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// Input pointers are invalid. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The SHA256 hash calculation failed. /// pub fn rsgx_sha256_msg<T>(src: &T) -> SgxResult<sgx_sha256_hash_t> where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut hash = sgx_sha256_hash_t::default(); let ret = unsafe { sgx_sha256_msg(src as *const _ as *const u8, size as u32, &mut hash as *mut sgx_sha256_hash_t) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(hash), _ => Err(ret), } } /// /// The rsgx_sha256_slice function performs a standard SHA256 hash over the input data buffer. /// pub fn rsgx_sha256_slice<T>(src: &[T]) -> SgxResult<sgx_sha256_hash_t> where T: Copy + ContiguousMemory, { let size = mem::size_of_val(src); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut hash = sgx_sha256_hash_t::default(); let ret = unsafe { sgx_sha256_msg(src.as_ptr() as *const u8, size as u32, &mut hash as *mut sgx_sha256_hash_t) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(hash), _ => Err(ret), } } fn rsgx_sha256_init(sha_handle: &mut sgx_sha_state_handle_t) -> sgx_status_t { unsafe { sgx_sha256_init(sha_handle as *mut sgx_sha_state_handle_t) } } fn rsgx_sha256_update_msg<T>(src: &T, sha_handle: sgx_sha_state_handle_t) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_sha256_update(src as *const _ as *const u8, size as u32, sha_handle) } } fn rsgx_sha256_update_slice<T>(src: &[T], sha_handle: sgx_sha_state_handle_t) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of_val(src); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_sha256_update(src.as_ptr() as *const u8, size as u32, sha_handle) } } fn rsgx_sha256_get_hash(sha_handle: sgx_sha_state_handle_t, hash: &mut sgx_sha256_hash_t) -> sgx_status_t { unsafe { sgx_sha256_get_hash(sha_handle, hash as *mut sgx_sha256_hash_t) } } fn rsgx_sha256_close(sha_handle: sgx_sha_state_handle_t) -> sgx_status_t { unsafe { sgx_sha256_close(sha_handle) } } pub fn rsgx_sha1_msg<T>(src: &T) -> SgxResult<sgx_sha1_hash_t> where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut hash = sgx_sha1_hash_t::default(); let ret = unsafe { sgx_sha1_msg(src as *const _ as *const u8, size as u32, &mut hash as *mut sgx_sha1_hash_t) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(hash), _ => Err(ret), } } pub fn rsgx_sha1_slice<T>(src: &[T]) -> SgxResult<sgx_sha1_hash_t> where T: Copy + ContiguousMemory, { let size = mem::size_of_val(src); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut hash = sgx_sha1_hash_t::default(); let ret = unsafe { sgx_sha1_msg(src.as_ptr() as *const u8, size as u32, &mut hash as *mut sgx_sha1_hash_t) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(hash), _ => Err(ret), } } fn rsgx_sha1_init(sha_handle: &mut sgx_sha_state_handle_t) -> sgx_status_t { unsafe { sgx_sha1_init(sha_handle as *mut sgx_sha_state_handle_t) } } fn rsgx_sha1_update_msg<T>(src: &T, sha_handle: sgx_sha_state_handle_t) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_sha1_update(src as *const _ as *const u8, size as u32, sha_handle) } } fn rsgx_sha1_update_slice<T>(src: &[T], sha_handle: sgx_sha_state_handle_t) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of_val(src); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_sha1_update(src.as_ptr() as *const u8, size as u32, sha_handle) } } fn rsgx_sha1_get_hash(sha_handle: sgx_sha_state_handle_t, hash: &mut sgx_sha1_hash_t) -> sgx_status_t { unsafe { sgx_sha1_get_hash(sha_handle, hash as *mut sgx_sha1_hash_t) } } fn rsgx_sha1_close(sha_handle: sgx_sha_state_handle_t) -> sgx_status_t { unsafe { sgx_sha1_close(sha_handle) } } /// /// SHA algorithm context state. /// /// This is a handle to the context state used by the cryptography library to perform an iterative SHA256 hash. /// The algorithm stores the intermediate results of performing the hash calculation over data sets. /// pub struct SgxShaHandle { handle: RefCell<sgx_sha_state_handle_t>, initflag: Cell<bool>, } impl SgxShaHandle { /// /// Constructs a new, empty SgxShaHandle. /// pub fn new() -> SgxShaHandle { SgxShaHandle{ handle: RefCell::new(ptr::null_mut() as sgx_sha_state_handle_t), initflag: Cell::new(false), } } /// /// init returns an allocated and initialized SHA algorithm context state. /// /// This should be part of the Init, Update … Update, Final process when the SHA hash is to be performed /// over multiple datasets. If a complete dataset is available, the recommend call is rsgx_sha256_msg to /// perform the hash in a single call. /// /// # Description /// /// Calling init is the first set in performing a SHA256 hash over multiple datasets. The caller does not /// allocate memory for the SHA256 state that this function returns. The state is specific to the implementation /// of the cryptography library; thus the allocation is performed by the library itself. If the hash over the /// desired datasets is completed or any error occurs during the hash calculation process, sgx_sha256_close should /// be called to free the state allocated by this algorithm. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The SHA256 state is not initialized properly due to an internal cryptography library failure. /// pub fn init(&self) -> SgxError { if self.initflag.get() { return Ok(()); } let ret = rsgx_sha256_init(self.handle.borrow_mut().deref_mut()); match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(true); Ok(()) } _ => Err(ret), } } /// /// update_msg performs a SHA256 hash over the input dataset provided. /// /// This function supports an iterative calculation of the hash over multiple datasets where the /// sha_handle contains the intermediate results of the hash calculation over previous datasets. /// /// # Description /// /// This function should be used as part of a SHA256 calculation over multiple datasets. /// If a SHA256 hash is needed over a single data set, function rsgx_sha256_msg should be used instead. /// Prior to calling this function on the first dataset, the init function must be called first to allocate /// and initialize the SHA256 state structure which will hold intermediate hash results over earlier datasets. /// The function get_hash should be used to obtain the hash after the final dataset has been processed /// by this function. /// /// # Parameters /// /// **src** /// /// A pointer to the input data stream to be hashed. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The SHA256 state is not initialized. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred while performing the SHA256 hash calculation. /// pub fn update_msg<T>(&self, src: &T) -> SgxError where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_sha256_update_msg(src, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } /// /// update_slice performs a SHA256 hash over the input dataset provided. /// pub fn update_slice<T>(&self, src: &[T]) -> SgxError where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_sha256_update_slice(src, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } /// /// get_hash obtains the SHA256 hash after the final dataset has been processed. /// /// # Description /// /// This function returns the hash after performing the SHA256 calculation over one or more datasets /// using the update function. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// The 256-bit hash that has been SHA256 calculated /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The SHA256 state is not initialized. /// /// **SGX_ERROR_UNEXPECTED** /// /// The SHA256 state passed in is likely problematic causing an internal cryptography library failure. /// pub fn get_hash(&self) -> SgxResult<sgx_sha256_hash_t> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut hash = sgx_sha256_hash_t::default(); let ret = rsgx_sha256_get_hash(*self.handle.borrow(), &mut hash); match ret { sgx_status_t::SGX_SUCCESS => Ok(hash), _ => Err(ret), } } /// /// close cleans up and deallocates the SHA256 state that was allocated in function init. /// /// # Description /// /// Calling close is the last step after performing a SHA256 hash over multiple datasets. /// The caller uses this function to deallocate memory used to store the SHA256 calculation state. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The input handle is invalid. /// pub fn close(&self) -> SgxError { if !self.initflag.get() { return Ok(()); } let ret = { let handle = *self.handle.borrow(); if handle.is_null() { sgx_status_t::SGX_SUCCESS } else { rsgx_sha256_close(handle) } }; match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(false); *self.handle.borrow_mut() = ptr::null_mut(); Ok(()) } _ => Err(ret), } } } impl Default for SgxShaHandle { fn default() -> Self { Self::new() } } impl Drop for SgxShaHandle { /// /// drop cleans up and deallocates the SHA256 state that was allocated in function init. /// fn drop(&mut self) { let _ = self.close(); } } pub struct SgxSha1Handle { handle: RefCell<sgx_sha_state_handle_t>, initflag: Cell<bool>, } impl SgxSha1Handle { pub fn new() -> SgxSha1Handle { SgxSha1Handle{ handle: RefCell::new(ptr::null_mut() as sgx_sha_state_handle_t), initflag: Cell::new(false), } } pub fn init(&self) -> SgxError { if self.initflag.get() { return Ok(()); } let ret = rsgx_sha1_init(self.handle.borrow_mut().deref_mut()); match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(true); Ok(()) } _ => Err(ret), } } pub fn update_msg<T>(&self, src: &T) -> SgxError where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_sha1_update_msg(src, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn update_slice<T>(&self, src: &[T]) -> SgxError where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_sha1_update_slice(src, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn get_hash(&self) -> SgxResult<sgx_sha1_hash_t> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut hash = sgx_sha1_hash_t::default(); let ret = rsgx_sha1_get_hash(*self.handle.borrow(), &mut hash); match ret { sgx_status_t::SGX_SUCCESS => Ok(hash), _ => Err(ret), } } pub fn close(&self) -> SgxError { if !self.initflag.get() { return Ok(()); } let ret = { let handle = *self.handle.borrow(); if handle.is_null() { sgx_status_t::SGX_SUCCESS } else { rsgx_sha1_close(handle) } }; match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(false); *self.handle.borrow_mut() = ptr::null_mut(); Ok(()) } _ => Err(ret), } } } impl Default for SgxSha1Handle { fn default() -> Self { Self::new() } } impl Drop for SgxSha1Handle { /// /// drop cleans up and deallocates the SHA256 state that was allocated in function init. /// fn drop(&mut self) { let _ = self.close(); } } /// /// rsgx_rijndael128GCM_encrypt performs a Rijndael AES-GCM encryption operation. /// /// Only a 128bit key size is supported by this Intel(R) SGX SDK cryptography library. /// /// # Description /// /// The Galois/Counter Mode (GCM) is a mode of operation of the AES algorithm. /// GCM [NIST SP 800-38D] uses a variation of the counter mode of operation for /// encryption. GCM assures authenticity of the confidential data (of up to about /// 64 GB per invocation) using a universal hash function defined over a binary /// finite field (the Galois field). /// /// GCM can also provide authentication assurance for additional data (of practically /// unlimited length per invocation) that is not encrypted. GCM provides /// stronger authentication assurance than a (non-cryptographic) checksum or /// error detecting code. In particular, GCM can detect both accidental modifications /// of the data and intentional, unauthorized modifications. /// /// It is recommended that the source and destination data buffers are allocated /// within the enclave. The AAD buffer could be allocated within or outside /// enclave memory. The use of AAD data buffer could be information identifying /// the encrypted data since it will remain in clear text. /// /// # Parameters /// /// **key** /// /// A pointer to key to be used in the AES-GCM encryption operation. The size must be 128 bits. /// /// **src** /// /// A pointer to the input data stream to be encrypted. Buffer content could be empty if there is AAD text. /// /// **iv** /// /// A pointer to the initialization vector to be used in the AES-GCM calculation. NIST AES-GCM recommended /// IV size is 96 bits (12 bytes). /// /// **aad** /// /// A pointer to an optional additional authentication data buffer which is used in the GCM MAC calculation. /// The data in this buffer will not be encrypted. The field is optional and content could be empty. /// /// **dst** /// /// A pointer to the output encrypted data buffer. This buffer should be allocated by the calling code. /// /// **mac** /// /// This is the output GCM MAC performed over the input data buffer (data to be encrypted) as well as /// the additional authentication data (this is optional data). The calling code should allocate this buffer. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// If both source buffer and AAD buffer content are empty. /// /// If IV Length is not equal to 12 (bytes). /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred. /// pub fn rsgx_rijndael128GCM_encrypt( key: &sgx_aes_gcm_128bit_key_t, src: &[u8], iv: &[u8], aad: &[u8], dst: &mut [u8], mac: &mut sgx_aes_gcm_128bit_tag_t, ) -> SgxError { let src_len = src.len(); if src_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let iv_len = iv.len(); if iv_len != SGX_AESGCM_IV_SIZE { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let aad_len = aad.len(); if aad_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let dst_len = dst.len(); if dst_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if dst_len < src_len { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let ret = unsafe { let p_aad = if aad_len != 0 { aad.as_ptr() } else { ptr::null() }; let (p_src, p_dst) = if src_len != 0 { (src.as_ptr(), dst.as_mut_ptr()) } else { (ptr::null(), ptr::null_mut()) }; sgx_rijndael128GCM_encrypt( key as *const sgx_aes_gcm_128bit_key_t, p_src, src_len as u32, p_dst, iv.as_ptr(), iv_len as u32, p_aad, aad_len as u32, mac as *mut sgx_aes_gcm_128bit_tag_t, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } /// /// rsgx_rijndael128GCM_decrypt performs a Rijndael AES-GCM decryption operation. /// /// Only a 128bit key size is supported by this Intel(R) SGX SDK cryptography library. /// /// # Description /// /// The Galois/Counter Mode (GCM) is a mode of operation of the AES algorithm. /// GCM [NIST SP 800-38D] uses a variation of the counter mode of operation for /// encryption. GCM assures authenticity of the confidential data (of up to about /// 64 GB per invocation) using a universal hash function defined over a binary /// finite field (the Galois field). /// /// GCM can also provide authentication assurance for additional data (of practically /// unlimited length per invocation) that is not encrypted. GCM provides /// stronger authentication assurance than a (non-cryptographic) checksum or /// error detecting code. In particular, GCM can detect both accidental modifications /// of the data and intentional, unauthorized modifications. /// /// It is recommended that the destination data buffer is allocated within the /// enclave. The AAD buffer could be allocated within or outside enclave memory. /// /// # Parameters /// /// **key** /// /// A pointer to key to be used in the AES-GCM decryption operation. The size must be 128 bits. /// /// **src** /// /// A pointer to the input data stream to be decrypted. Buffer content could be empty if there is AAD text. /// /// **iv** /// /// A pointer to the initialization vector to be used in the AES-GCM calculation. NIST AES-GCM recommended /// IV size is 96 bits (12 bytes). /// /// **aad** /// /// A pointer to an optional additional authentication data buffer which is provided for the GCM MAC calculation /// when encrypting. The data in this buffer was not encrypted. The field is optional and content could be empty. /// /// **mac** /// /// This is the GCM MAC that was performed over the input data buffer (data to be encrypted) as well as /// the additional authentication data (this is optional data) during the encryption process (call to /// rsgx_rijndael128GCM_encrypt). /// /// **dst** /// /// A pointer to the output decrypted data buffer. This buffer should be allocated by the calling code. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// If both source buffer and AAD buffer content are empty. /// /// If IV Length is not equal to 12 (bytes). /// /// **SGX_ERROR_MAC_MISMATCH** /// /// The input MAC does not match the MAC calculated. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred. /// pub fn rsgx_rijndael128GCM_decrypt( key: &sgx_aes_gcm_128bit_key_t, src: &[u8], iv: &[u8], aad: &[u8], mac: &sgx_aes_gcm_128bit_tag_t, dst: &mut [u8], ) -> SgxError { let src_len = src.len(); if src_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let iv_len = iv.len(); if iv_len != SGX_AESGCM_IV_SIZE { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let aad_len = aad.len(); if aad_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let dst_len = dst.len(); if dst_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if dst_len < src_len { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let ret = unsafe { let p_aad = if !aad.is_empty() { aad.as_ptr() } else { ptr::null() }; let (p_src, p_dst) = if src_len != 0 { (src.as_ptr(), dst.as_mut_ptr()) } else { (ptr::null(), ptr::null_mut()) }; sgx_rijndael128GCM_decrypt( key as *const sgx_aes_gcm_128bit_key_t, p_src, src_len as u32, p_dst, iv.as_ptr(), iv_len as u32, p_aad, aad_len as u32, mac as *const sgx_aes_gcm_128bit_tag_t, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } /// /// The rsgx_rijndael128_cmac_msg function performs a standard 128bit CMAC hash over the input data buffer. /// /// # Description /// /// The rsgx_rijndael128_cmac_msg function performs a standard CMAC hash over the input data buffer. /// Only a 128-bit version of the CMAC hash is supported. /// /// The function should be used if the complete input data stream is available. /// Otherwise, the Init, Update… Update, Final procedure should be used to compute /// a CMAC hash over multiple input data sets. /// /// # Parameters /// /// **key** /// /// A pointer to key to be used in the CMAC hash operation. The size must be 128 bits. /// /// **src** /// /// A pointer to the input data stream to be hashed. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// The 128-bit hash that has been CMAC calculated /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred. /// pub fn rsgx_rijndael128_cmac_msg<T>(key: &sgx_cmac_128bit_key_t, src: &T) -> SgxResult<sgx_cmac_128bit_tag_t> where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut mac = sgx_cmac_128bit_tag_t::default(); let ret = unsafe { sgx_rijndael128_cmac_msg( key as *const sgx_cmac_128bit_key_t, src as *const _ as *const u8, size as u32, &mut mac as *mut sgx_cmac_128bit_tag_t, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(mac), _ => Err(ret), } } /// /// The rsgx_rijndael128_cmac_slice function performs a standard 128bit CMAC hash over the input data buffer. /// pub fn rsgx_rijndael128_cmac_slice<T>(key: &sgx_cmac_128bit_key_t, src: &[T]) -> SgxResult<sgx_cmac_128bit_tag_t> where T: Copy + ContiguousMemory, { let size = mem::size_of_val(src); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut mac = sgx_cmac_128bit_tag_t::default(); let ret = unsafe { sgx_rijndael128_cmac_msg( key as *const sgx_cmac_128bit_key_t, src.as_ptr() as *const u8, size as u32, &mut mac as *mut sgx_cmac_128bit_tag_t, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(mac), _ => Err(ret), } } fn rsgx_cmac128_init(key: &sgx_cmac_128bit_key_t, cmac_handle: &mut sgx_cmac_state_handle_t) -> sgx_status_t { unsafe { sgx_cmac128_init( key as *const sgx_cmac_128bit_key_t, cmac_handle as *mut sgx_cmac_state_handle_t, ) } } fn rsgx_cmac128_update_msg<T>(src: &T, cmac_handle: sgx_cmac_state_handle_t) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_cmac128_update(src as *const _ as *const u8, size as u32, cmac_handle) } } fn rsgx_cmac128_update_slice<T>(src: &[T], cmac_handle: sgx_cmac_state_handle_t) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of_val(src); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_cmac128_update(src.as_ptr() as *const _ as *const u8, size as u32, cmac_handle) } } fn rsgx_cmac128_final(cmac_handle: sgx_cmac_state_handle_t, hash: &mut sgx_cmac_128bit_tag_t) -> sgx_status_t { unsafe { sgx_cmac128_final(cmac_handle, hash as *mut sgx_cmac_128bit_tag_t) } } fn rsgx_cmac128_close(cmac_handle: sgx_cmac_state_handle_t) -> sgx_status_t { unsafe { sgx_cmac128_close(cmac_handle) } } /// /// CMAC algorithm context state. /// /// This is a handle to the context state used by the cryptography library to perform an /// iterative CMAC 128-bit hash. The algorithm stores the intermediate results of performing /// the hash calculation over data sets. /// pub struct SgxCmacHandle { handle: RefCell<sgx_cmac_state_handle_t>, initflag: Cell<bool>, } impl SgxCmacHandle { /// /// Constructs a new, empty SgxCmacHandle. /// pub fn new() -> SgxCmacHandle { SgxCmacHandle { handle: RefCell::new(ptr::null_mut() as sgx_cmac_state_handle_t), initflag: Cell::new(false), } } /// /// init returns an allocated and initialized CMAC algorithm context state. /// /// This should be part of the Init, Update … Update, Final process when the CMAC hash is to be /// performed over multiple datasets. If a complete dataset is available, the recommended call /// is rsgx_rijndael128_cmac_msg to perform the hash in a single call. /// /// # Description /// /// Calling init is the first set in performing a CMAC 128-bit hash over multiple datasets. /// The caller does not allocate memory for the CMAC state that this function returns. /// The state is specific to the implementation of the cryptography library and thus the /// allocation is performed by the library itself. If the hash over the desired datasets is /// completed or any error occurs during the hash calculation process, sgx_cmac128_close should /// be called to free the state allocated by this algorithm. /// /// # Parameters /// /// **key** /// /// A pointer to key to be used in the CMAC hash operation. The size must be 128 bits. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred. /// pub fn init(&self, key: &sgx_cmac_128bit_key_t) -> SgxError { if self.initflag.get() { return Ok(()); } let ret = rsgx_cmac128_init(key, self.handle.borrow_mut().deref_mut()); match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(true); Ok(()) } _ => Err(ret), } } /// /// update_msg performs a CMAC 128-bit hash over the input dataset provided. /// /// This function supports an iterative calculation of the hash over multiple datasets where the /// cmac_handle contains the intermediate results of the hash calculation over previous datasets. /// /// # Description /// /// This function should be used as part of a CMAC 128-bit hash calculation over /// multiple datasets. If a CMAC hash is needed over a single data set, function /// rsgx_rijndael128_cmac128_msg should be used instead. Prior to calling /// this function on the first dataset, the init function must be called first to /// allocate and initialize the CMAC state structure which will hold intermediate /// hash results over earlier datasets. The function get_hash should be used /// to obtain the hash after the final dataset has been processed by this function. /// /// # Parameters /// /// **src** /// /// A pointer to the input data stream to be hashed. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The CMAC state is not initialized. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred while performing the CMAC hash calculation. /// pub fn update_msg<T>(&self, src: &T) -> SgxError where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_cmac128_update_msg(src, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } /// /// update_slice performs a CMAC 128-bit hash over the input dataset provided. /// pub fn update_slice<T>(&self, src: &[T]) -> SgxError where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_cmac128_update_slice(src, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } /// /// get_hash obtains the CMAC 128-bit hash after the final dataset has been processed. /// /// # Description /// /// This function returns the hash after performing the CMAC 128-bit hash calculation /// over one or more datasets using the update function. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// The 128-bit hash that has been CMAC calculated /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The CMAC state is not initialized. /// /// **SGX_ERROR_UNEXPECTED** /// /// The CMAC state passed in is likely problematic causing an internal cryptography library failure. /// pub fn get_hash(&self) -> SgxResult<sgx_cmac_128bit_tag_t> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut hash = sgx_cmac_128bit_tag_t::default(); let ret = rsgx_cmac128_final(*self.handle.borrow(), &mut hash); match ret { sgx_status_t::SGX_SUCCESS => Ok(hash), _ => Err(ret), } } /// /// close cleans up and deallocates the CMAC algorithm context state that was allocated in function init. /// /// # Description /// /// Calling close is the last step after performing a CMAC hash over multiple datasets. /// The caller uses this function to deallocate memory used for storing the CMAC algorithm context state. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The input handle is invalid. /// pub fn close(&self) -> SgxError { if !self.initflag.get() { return Ok(()); } let ret = { let handle = *self.handle.borrow(); if handle.is_null() { sgx_status_t::SGX_SUCCESS } else { rsgx_cmac128_close(handle) } }; match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(false); *self.handle.borrow_mut() = ptr::null_mut(); Ok(()) } _ => Err(ret), } } } impl Default for SgxCmacHandle { fn default() -> Self { Self::new() } } impl Drop for SgxCmacHandle { /// /// drop cleans up and deallocates the CMAC algorithm context state that was allocated in function init. /// fn drop(&mut self) { let _ = self.close(); } } pub fn rsgx_hmac_sha256_msg<T>(key: &sgx_hmac_256bit_key_t, src: &T) -> SgxResult<sgx_hmac_256bit_tag_t> where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut mac = sgx_hmac_256bit_tag_t::default(); let ret = unsafe { sgx_hmac_sha256_msg( src as *const _ as *const u8, size as i32, key as *const u8, SGX_HMAC256_KEY_SIZE as i32, &mut mac as *mut sgx_hmac_256bit_tag_t as *mut u8, SGX_HMAC256_MAC_SIZE as i32, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(mac), _ => Err(ret), } } pub fn rsgx_hmac_sha256_slice<T>(key: &sgx_hmac_256bit_key_t, src: &[T]) -> SgxResult<sgx_hmac_256bit_tag_t> where T: Copy + ContiguousMemory, { let size = mem::size_of_val(src); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut mac = sgx_hmac_256bit_tag_t::default(); let ret = unsafe { sgx_hmac_sha256_msg( src.as_ptr() as *const u8, size as i32, key as *const u8, SGX_HMAC256_KEY_SIZE as i32, &mut mac as *mut sgx_hmac_256bit_tag_t as *mut u8, SGX_HMAC256_MAC_SIZE as i32, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(mac), _ => Err(ret), } } fn rsgx_hmac256_init(key: &sgx_hmac_256bit_key_t, hmac_handle: &mut sgx_hmac_state_handle_t) -> sgx_status_t { unsafe { sgx_hmac256_init( key as *const sgx_hmac_256bit_key_t as *const u8, SGX_HMAC256_KEY_SIZE as i32, hmac_handle as *mut sgx_hmac_state_handle_t, ) } } fn rsgx_hmac256_update_msg<T>(src: &T, hmac_handle: sgx_hmac_state_handle_t) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_hmac256_update(src as *const _ as *const u8, size as i32, hmac_handle) } } fn rsgx_hmac256_update_slice<T>(src: &[T], hmac_handle: sgx_hmac_state_handle_t) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of_val(src); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_hmac256_update(src.as_ptr() as *const _ as *const u8, size as i32, hmac_handle) } } fn rsgx_hmac256_final(hmac_handle: sgx_hmac_state_handle_t, hash: &mut sgx_hmac_256bit_tag_t) -> sgx_status_t { unsafe { sgx_hmac256_final( hash as *mut sgx_hmac_256bit_tag_t as *mut u8, SGX_HMAC256_MAC_SIZE as i32, hmac_handle, ) } } fn rsgx_hmac256_close(hmac_handle: sgx_hmac_state_handle_t) -> sgx_status_t { unsafe { sgx_hmac256_close(hmac_handle) } } pub struct SgxHmacHandle { handle: RefCell<sgx_hmac_state_handle_t>, initflag: Cell<bool>, } impl SgxHmacHandle { pub fn new() -> SgxHmacHandle { SgxHmacHandle{ handle: RefCell::new(ptr::null_mut() as sgx_hmac_state_handle_t), initflag: Cell::new(false), } } pub fn init(&self, key: &sgx_hmac_256bit_key_t) -> SgxError { if self.initflag.get() { return Ok(()); } let ret = rsgx_hmac256_init(key, self.handle.borrow_mut().deref_mut()); match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(true); Ok(()) } _ => Err(ret), } } pub fn update_msg<T>(&self, src: &T) -> SgxError where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_hmac256_update_msg(src, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn update_slice<T>(&self, src: &[T]) -> SgxError where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_hmac256_update_slice(src, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn get_hash(&self) -> SgxResult<sgx_hmac_256bit_tag_t> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut hash = sgx_hmac_256bit_tag_t::default(); let ret = rsgx_hmac256_final(*self.handle.borrow(), &mut hash); match ret { sgx_status_t::SGX_SUCCESS => Ok(hash), _ => Err(ret), } } pub fn close(&self) -> SgxError { if !self.initflag.get() { return Ok(()); } let ret = { let handle = *self.handle.borrow(); if handle.is_null() { sgx_status_t::SGX_SUCCESS } else { rsgx_hmac256_close(handle) } }; match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(false); *self.handle.borrow_mut() = ptr::null_mut(); Ok(()) } _ => Err(ret), } } } impl Default for SgxHmacHandle { fn default() -> Self { Self::new() } } impl Drop for SgxHmacHandle { fn drop(&mut self) { let _ = self.close(); } } pub const SGX_AESCTR_CTR_SIZE: size_t = 16; pub type sgx_aes_ctr_128bit_ctr_t = [uint8_t; SGX_AESCTR_CTR_SIZE]; /// /// rsgx_aes_ctr_encrypt performs a Rijndael AES-CTR encryption operation. /// /// Only a 128bit key size is supported by this Intel(R) SGX SDK cryptography library. /// /// # Description /// /// This function encrypts the input data stream of a variable length according to /// the CTR mode as specified in [NIST SP 800-38A]. The counter can be thought /// of as an IV which increments on successive encryption or decryption calls. For /// a given dataset or data stream, the incremented counter block should be used /// on successive calls of the encryption process for that given stream. However, /// for new or different datasets/streams, the same counter should not be reused, /// instead initialize the counter for the new data set. /// /// It is recommended that the source, destination and counter data buffers are /// allocated within the enclave. /// /// # Parameters /// /// **key** /// /// A pointer to key to be used in the AES-CTR encryption operation. The size must be 128 bits. /// /// **src** /// /// A pointer to the input data stream to be encrypted. /// /// **ctr** /// /// A pointer to the initialization vector to be used in the AES-CTR calculation. /// /// **ctr_inc_bits** /// /// Specifies the number of bits in the counter to be incremented. /// /// **dst** /// /// A pointer to the output encrypted data buffer. This buffer should be allocated by the calling code. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred. /// pub fn rsgx_aes_ctr_encrypt( key: &sgx_aes_ctr_128bit_key_t, src: &[u8], ctr: &sgx_aes_ctr_128bit_ctr_t, ctr_inc_bits: u32, dst: &mut [u8], ) -> SgxError { let src_len = src.len(); if src_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if src_len < 1 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let dst_len = dst.len(); if dst_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if dst_len < src_len { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let ret = unsafe { sgx_aes_ctr_encrypt( key as *const sgx_aes_ctr_128bit_key_t, src.as_ptr(), src_len as u32, ctr as *const sgx_aes_ctr_128bit_ctr_t as *const u8, ctr_inc_bits, dst.as_mut_ptr(), ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } /// /// rsgx_aes_ctr_decrypt performs a Rijndael AES-CTR decryption operation. /// /// Only a 128bit key size is supported by this Intel(R) SGX SDK cryptography library. /// /// # Description /// /// This function decrypts the input data stream of a variable length according to /// the CTR mode as specified in [NIST SP 800-38A]. The counter can be thought /// of as an IV which increments on successive encryption or decryption calls. For /// a given dataset or data stream, the incremented counter block should be used /// on successive calls of the decryption process for that given stream. However, /// for new or different datasets/streams, the same counter should not be reused, /// instead initialize the counter for the new data set. /// /// It is recommended that the source, destination and counter data buffers are /// allocated within the enclave. /// /// # Parameters /// /// **key** /// /// A pointer to key to be used in the AES-CTR encryption operation. The size must be 128 bits. /// /// **src** /// /// A pointer to the input data stream to be decrypted. /// /// **ctr** /// /// A pointer to the initialization vector to be used in the AES-CTR calculation. /// /// **ctr_inc_bits** /// /// Specifies the number of bits in the counter to be incremented. /// /// **dst** /// /// A pointer to the output decrypted data buffer. This buffer should be allocated by the calling code. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred. /// pub fn rsgx_aes_ctr_decrypt( key: &sgx_aes_ctr_128bit_key_t, src: &[u8], ctr: &sgx_aes_ctr_128bit_ctr_t, ctr_inc_bits: u32, dst: &mut [u8], ) -> SgxError { let src_len = src.len(); if src_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if src_len < 1 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let dst_len = dst.len(); if dst_len > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if dst_len < src_len { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let ret = unsafe { sgx_aes_ctr_decrypt( key as *const sgx_aes_ctr_128bit_key_t, src.as_ptr(), src.len() as u32, ctr as *const sgx_aes_ctr_128bit_ctr_t as *const u8, ctr_inc_bits, dst.as_mut_ptr(), ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } fn rsgx_ecc256_open_context(ecc_handle: &mut sgx_ecc_state_handle_t) -> sgx_status_t { unsafe { sgx_ecc256_open_context(ecc_handle as *mut _ as *mut sgx_ecc_state_handle_t) } } fn rsgx_ecc256_close_context(ecc_handle: sgx_ecc_state_handle_t) -> sgx_status_t { unsafe { sgx_ecc256_close_context(ecc_handle) } } fn rsgx_ecc256_create_key_pair( private: &mut sgx_ec256_private_t, public: &mut sgx_ec256_public_t, ecc_handle: sgx_ecc_state_handle_t, ) -> sgx_status_t { unsafe { sgx_ecc256_create_key_pair( private as *mut sgx_ec256_private_t, public as *mut sgx_ec256_public_t, ecc_handle, ) } } fn rsgx_ecc256_check_point(point: &sgx_ec256_public_t, ecc_handle: sgx_ecc_state_handle_t, valid: &mut i32) -> sgx_status_t { unsafe { sgx_ecc256_check_point(point as *const sgx_ec256_public_t, ecc_handle, valid as *mut i32) } } fn rsgx_ecc256_compute_shared_dhkey( private_b: &sgx_ec256_private_t, public_ga: &sgx_ec256_public_t, shared_key: &mut sgx_ec256_dh_shared_t, ecc_handle: sgx_ecc_state_handle_t, ) -> sgx_status_t { unsafe { sgx_ecc256_compute_shared_dhkey( private_b as *const _ as *mut sgx_ec256_private_t, public_ga as *const _ as *mut sgx_ec256_public_t, shared_key as *mut sgx_ec256_dh_shared_t, ecc_handle, ) } } /* delete (intel sgx sdk 2.0) fn rsgx_ecc256_compute_shared_dhkey512( private_b: &sgx_ec256_private_t, public_ga: &sgx_ec256_public_t, shared_key: &mut sgx_ec256_dh_shared512_t, ecc_handle: sgx_ecc_state_handle_t, ) -> sgx_status_t { unsafe { sgx_ecc256_compute_shared_dhkey512( private_b as *const _ as *mut sgx_ec256_private_t, public_ga as *const _ as *mut sgx_ec256_public_t, shared_key as *mut sgx_ec256_dh_shared512_t, ecc_handle, ) } } */ fn rsgx_ecdsa_sign_msg<T>( data: &T, private: &sgx_ec256_private_t, signature: &mut sgx_ec256_signature_t, ecc_handle: sgx_ecc_state_handle_t, ) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_ecdsa_sign(data as *const _ as *const u8, size as u32, private as *const _ as *mut sgx_ec256_private_t, signature as *mut sgx_ec256_signature_t, ecc_handle) } } fn rsgx_ecdsa_sign_slice<T>( data: &[T], private: &sgx_ec256_private_t, signature: &mut sgx_ec256_signature_t, ecc_handle: sgx_ecc_state_handle_t, ) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of_val(data); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_ecdsa_sign( data.as_ptr() as *const _ as *const u8, size as u32, private as *const _ as *mut sgx_ec256_private_t, signature as *mut sgx_ec256_signature_t, ecc_handle, ) } } fn rsgx_ecdsa_verify_msg<T>( data: &T, public: &sgx_ec256_public_t, signature: &sgx_ec256_signature_t, result: &mut sgx_generic_ecresult_t, ecc_handle: sgx_ecc_state_handle_t, ) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { let mut verify: u8 = 0; let ret = sgx_ecdsa_verify( data as *const _ as *const u8, size as u32, public as *const sgx_ec256_public_t, signature as *const _ as *mut sgx_ec256_signature_t, &mut verify as *mut u8, ecc_handle); match ret { sgx_status_t::SGX_SUCCESS => { let ecresult = sgx_generic_ecresult_t::from_repr(u32::from(verify)); *result = ecresult.unwrap_or(sgx_generic_ecresult_t::SGX_EC_INVALID_SIGNATURE); } _ => { *result = sgx_generic_ecresult_t::SGX_EC_INVALID_SIGNATURE; } }; ret } } fn rsgx_ecdsa_verify_slice<T>( data: &[T], public: &sgx_ec256_public_t, signature: &sgx_ec256_signature_t, result: &mut sgx_generic_ecresult_t, ecc_handle: sgx_ecc_state_handle_t, ) -> sgx_status_t where T: Copy + ContiguousMemory, { let size = mem::size_of_val(data); if size == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if size > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { let mut verify: u8 = 0; let ret = sgx_ecdsa_verify( data.as_ptr() as *const _ as *const u8, size as u32, public as *const sgx_ec256_public_t, signature as *const _ as *mut sgx_ec256_signature_t, &mut verify as *mut u8, ecc_handle, ); match ret { sgx_status_t::SGX_SUCCESS => { let ecresult = sgx_generic_ecresult_t::from_repr(u32::from(verify)); *result = ecresult.unwrap_or(sgx_generic_ecresult_t::SGX_EC_INVALID_SIGNATURE); } _ => { *result = sgx_generic_ecresult_t::SGX_EC_INVALID_SIGNATURE; } }; ret } } fn rsgx_ecdsa_verify_hash( hash: &sgx_sha256_hash_t, public: &sgx_ec256_public_t, signature: &sgx_ec256_signature_t, result: &mut sgx_generic_ecresult_t, ecc_handle: sgx_ecc_state_handle_t, ) -> sgx_status_t { unsafe { let mut verify: u8 = 0; let ret = sgx_ecdsa_verify_hash( hash as *const sgx_sha256_hash_t as *const u8, public as *const sgx_ec256_public_t, signature as *const _ as *mut sgx_ec256_signature_t, &mut verify as *mut u8, ecc_handle, ); match ret { sgx_status_t::SGX_SUCCESS => { let ecresult = sgx_generic_ecresult_t::from_repr(u32::from(verify)); *result = ecresult.unwrap_or(sgx_generic_ecresult_t::SGX_EC_INVALID_SIGNATURE); } _ => { *result = sgx_generic_ecresult_t::SGX_EC_INVALID_SIGNATURE; } }; ret } } /// /// ECC GF(p) context state. /// /// This is a handle to the ECC GF(p) context state allocated and initialized used to perform /// elliptic curve cryptosystem standard functions. The algorithm stores the intermediate results /// of calculations performed using this context. /// pub struct SgxEccHandle{ handle: RefCell<sgx_ecc_state_handle_t>, initflag: Cell<bool>, } impl SgxEccHandle { /// /// Constructs a new, empty SgxEccHandle. /// pub fn new() -> SgxEccHandle { SgxEccHandle { handle: RefCell::new(ptr::null_mut() as sgx_ecc_state_handle_t), initflag: Cell::new(false), } } /// /// open returns an allocated and initialized context for the elliptic curve cryptosystem /// over a prime finite field, GF(p). /// /// This context must be created prior to calling create_key_pair or compute_shared_dhkey. /// When the calling code has completed its set of ECC operations, close should be called to /// cleanup and deallocate the ECC context. /// /// # Description /// /// open is utilized to allocate and initialize a 256-bit /// GF(p) cryptographic system. The caller does not allocate memory for the ECC /// state that this function returns. The state is specific to the implementation of /// the cryptography library and thus the allocation is performed by the library /// itself. If the ECC cryptographic function using this cryptographic system is completed /// or any error occurs, close should be called to free the state allocated by this algorithm. /// /// Public key cryptography successfully allows to solving problems of information /// safety by enabling trusted communication over insecure channels. Although /// elliptic curves are well studied as a branch of mathematics, an interest to the /// cryptographic schemes based on elliptic curves is constantly rising due to the /// advantages that the elliptic curve algorithms provide in the wireless communications: /// shorter processing time and key length. /// /// Elliptic curve cryptosystems (ECCs) implement a different way of creating public /// keys. As elliptic curve calculation is based on the addition of the rational /// points in the (x,y) plane and it is difficult to solve a discrete logarithm from /// these points, a higher level of safety is achieved through the cryptographic /// schemes that use the elliptic curves. The cryptographic systems that encrypt /// messages by using the properties of elliptic curves are hard to attack due to /// the extreme complexity of deciphering the private key. /// /// Using of elliptic curves allows shorter public key length and encourages cryptographers /// to create cryptosystems with the same or higher encryption /// strength as the RSA or DSA cryptosystems. Because of the relatively short key /// length, ECCs do encryption and decryption faster on the hardware that /// requires less computation processing volumes. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The ECC context state was not initialized properly due to an internal cryptography library failure. /// pub fn open(&self) -> SgxError { if self.initflag.get() { return Ok(()); } let ret = rsgx_ecc256_open_context(self.handle.borrow_mut().deref_mut()); match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(true); Ok(()) } _ => Err(ret), } } /// /// create_key_pair generates a private/public key pair on the ECC curve for the given /// cryptographic system. /// /// open must be called to allocate and initialize the ECC context prior to making this call. /// /// # Description /// /// This function populates private/public key pair. The calling code allocates /// memory for the private and public key pointers to be populated. The function /// generates a private key p_private and computes a public key p_public of /// the elliptic cryptosystem over a finite field GF(p). /// /// The private key p_private is a number that lies in the range of [1, n-1] /// where n is the order of the elliptic curve base point. /// The public key p_public is an elliptic curve point such that p_public = /// p_private *G, where G is the base point of the elliptic curve. /// The context of the point p_public as an elliptic curve point must be created /// by using the function open. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// **sgx_ec256_private_t** /// /// The private key which is a number that lies in the range of [1, n-1] where n is the order /// of the elliptic curve base point. /// /// **sgx_ec256_public_t** /// /// The public key which is an elliptic curve point such that: /// /// public key = private key * G, where G is the base point of the elliptic curve. /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The ECC state is not initialized. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The key creation process failed due to an internal cryptography library failure. /// pub fn create_key_pair(&self) -> SgxResult<(sgx_ec256_private_t, sgx_ec256_public_t)> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut private = sgx_ec256_private_t::default(); let mut public = sgx_ec256_public_t::default(); let ret = rsgx_ecc256_create_key_pair(&mut private, &mut public, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok((private, public)), _ => Err(ret), } } /// /// check_point checks whether the input point is a valid point on the ECC curve for the given cryptographic system. /// /// open context must be called to allocate and initialize the ECC context prior to making this call. /// /// # Description /// /// check_point validates whether the input point is a valid point on the ECC curve for the given cryptographic system. /// /// # Parameters /// /// **point** /// /// A pointer to the point to perform validity check on. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// **true** /// /// The input point is valid /// /// **false** /// /// The input point is not valid /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The ECC state is not initialized. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// An internal cryptography library failure occurred. /// pub fn check_point(&self, point: &sgx_ec256_public_t) -> SgxResult<bool> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut valid: i32 = 0; let ret = rsgx_ecc256_check_point(point, *self.handle.borrow(), &mut valid); match ret { sgx_status_t::SGX_SUCCESS => { if valid > 0 { Ok(true) } else { Ok(false) } } _ => Err(ret), } } /// /// compute_shared_dhkey generates a secret key shared between two participants of the cryptosystem. /// /// # Description /// /// This function computes the Diffie-Hellman shared key based on the enclave’s /// own (local) private key and remote enclave’s public Ga Key. /// /// The function computes a secret number sharedKey, which is a secret key /// shared between two participants of the cryptosystem. /// /// In cryptography, metasyntactic names such as Alice as Bob are normally used /// as examples and in discussions and stand for participant A and participant B. /// /// Both participants (Alice and Bob) use the cryptosystem for receiving a common /// secret point on the elliptic curve called a secret key (sharedKey). To /// receive a secret key, participants apply the Diffie-Hellman key-agreement /// scheme involving public key exchange. The value of the secret key entirely /// depends on participants. /// /// According to the scheme, Alice and Bob perform the following operations: /// /// 1. Alice calculates her own public key pubKeyA by using her private key /// privKeyA: pubKeyA = privKeyA * G, where G is the base point of the /// elliptic curve. /// /// 2. Alice passes the public key to Bob. /// /// 3. Bob calculates his own public key pubKeyB by using his private key /// privKeyB: pubKeyB = privKeyB * G, where G is a base point of the elliptic curve. /// /// 4. Bob passes the public key to Alice. /// /// 5. Alice gets Bob's public key and calculates the secret point shareKeyA. When /// calculating, she uses her own private key and Bob's public key and applies the /// following formula: /// /// shareKeyA = privKeyA * pubKeyB = privKeyA * privKeyB * G. /// /// 6. Bob gets Alice's public key and calculates the secret point shareKeyB. When /// calculating, he uses his own private key and Alice's public key and applies the /// following formula: /// /// shareKeyB = privKeyB * pubKeyA = privKeyB * privKeyA * G. /// /// As the following equation is true privKeyA * privKeyB * G = /// privKeyB * privKeyA * G, the result of both calculations is the same, /// that is, the equation shareKeyA = shareKeyB is true. The secret point serves as /// a secret key. /// /// Shared secret shareKey is an x-coordinate of the secret point on the elliptic /// curve. The elliptic curve domain parameters must be hitherto defined by the /// function: open. /// /// # Parameters /// /// **private_b** /// /// A pointer to the local private key. /// /// **public_ga** /// /// A pointer to the remote public key. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// The secret key generated by this function which is a common point on the elliptic curve. /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The ECC state is not initialized. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The key creation process failed due to an internal cryptography library failure. /// pub fn compute_shared_dhkey(&self, private_b: &sgx_ec256_private_t, public_ga: &sgx_ec256_public_t) -> SgxResult<sgx_ec256_dh_shared_t> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut shared_key = sgx_ec256_dh_shared_t::default(); let ret = rsgx_ecc256_compute_shared_dhkey(private_b, public_ga, &mut shared_key, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(shared_key), _ => Err(ret), } } /* delete (intel sgx sdk 2.0) pub fn compute_shared_dhkey512(&self, private_b: &sgx_ec256_private_t, public_ga: &sgx_ec256_public_t) -> SgxResult<sgx_ec256_dh_shared512_t> { if self.initflag.get() == false { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut shared_key = sgx_ec256_dh_shared512_t::default(); let ret = rsgx_ecc256_compute_shared_dhkey512(private_b, public_ga, &mut shared_key, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(shared_key), _ => Err(ret), } } */ /// /// ecdsa_sign_msg computes a digital signature with a given private key over an input dataset. /// /// # Description /// /// This function computes a digital signature over the input dataset based on the /// put private key. /// /// A message digest is a fixed size number derived from the original message // with an applied hash function over the binary code of the message. (SHA256 /// in this case) /// /// The signer's private key and the message digest are used to create a signature. /// /// A digital signature over a message consists of a pair of large numbers, 256-bits /// each, which the given function computes. /// /// The scheme used for computing a digital signature is of the ECDSA scheme, an /// elliptic curve of the DSA scheme. /// /// The keys can be generated and set up by the function: create_key_pair. /// /// The elliptic curve domain parameters must be created by function: open. /// /// # Parameters /// /// **data** /// /// A pointer to the data to calculate the signature over. /// /// **private** /// /// A pointer to the private key to be used in the calculation of the signature. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// The signature generated by this function. /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The ECC state is not initialized. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The signature generation process failed due to an internal cryptography library failure. /// pub fn ecdsa_sign_msg<T>(&self, data: &T, private: &sgx_ec256_private_t) -> SgxResult<sgx_ec256_signature_t> where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut signature = sgx_ec256_signature_t::default(); let ret = rsgx_ecdsa_sign_msg(data, private, &mut signature, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(signature), _ => Err(ret), } } /// /// ecdsa_sign_slice computes a digital signature with a given private key over an input dataset. /// pub fn ecdsa_sign_slice<T>(&self, data: &[T], private: &sgx_ec256_private_t) -> SgxResult<sgx_ec256_signature_t> where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut signature = sgx_ec256_signature_t::default(); let ret = rsgx_ecdsa_sign_slice(data, private, &mut signature, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(signature), _ => Err(ret), } } /// /// ecdsa_verify_msg verifies the input digital signature with a given public key over an input dataset. /// /// # Description /// /// This function verifies the signature for the given data set based on the input public key. /// /// A digital signature over a message consists of a pair of large numbers, 256-bits /// each, which could be created by function: sgx_ecdsa_sign. The scheme /// used for computing a digital signature is of the ECDSA scheme, an elliptic /// curve of the DSA scheme. /// /// The elliptic curve domain parameters must be created by function: open. /// /// # Parameters /// /// **data** /// /// A pointer to the signed dataset to verify. /// /// **public** /// /// A pointer to the public key to be used in the calculation of the signature. /// /// **signature** /// /// A pointer to the signature to be verified. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// **true** /// /// Digital signature is valid. /// /// **false** /// /// Digital signature is not valid. /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The pointer is invalid. /// /// **SGX_ERROR_INVALID_STATE** /// /// The ECC state is not initialized. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The verification process failed due to an internal cryptography library failure. /// pub fn ecdsa_verify_msg<T>( &self, data: &T, public: &sgx_ec256_public_t, signature: &sgx_ec256_signature_t, ) -> SgxResult<bool> where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut result = sgx_generic_ecresult_t::default(); let ret = rsgx_ecdsa_verify_msg(data, public, signature, &mut result, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => { match result { sgx_generic_ecresult_t::SGX_EC_VALID => Ok(true), _ => Ok(false), } } _ => Err(ret), } } /// /// ecdsa_verify_slice verifies the input digital signature with a given public key over an input dataset. /// pub fn ecdsa_verify_slice<T>( &self, data: &[T], public: &sgx_ec256_public_t, signature: &sgx_ec256_signature_t, ) -> SgxResult<bool> where T: Copy + ContiguousMemory, { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut result = sgx_generic_ecresult_t::default(); let ret = rsgx_ecdsa_verify_slice(data, public, signature, &mut result, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => { match result { sgx_generic_ecresult_t::SGX_EC_VALID => Ok(true), _ => Ok(false), } } _ => Err(ret), } } pub fn ecdsa_verify_hash( &self, hash: &sgx_sha256_hash_t, public: &sgx_ec256_public_t, signature: &sgx_ec256_signature_t, ) -> SgxResult<bool> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut result = sgx_generic_ecresult_t::default(); let ret = rsgx_ecdsa_verify_hash(hash, public, signature, &mut result, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => { match result { sgx_generic_ecresult_t::SGX_EC_VALID => Ok(true), _ => Ok(false), } } _ => Err(ret), } } /// /// close cleans up and deallocates the ECC 256 GF(p) state that was allocated in function open. /// /// # Description /// /// close is used by calling code to deallocate memory used for storing the ECC 256 GF(p) state used /// in ECC cryptographic calculations. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The input handle is invalid. /// pub fn close(&self) -> SgxError { if !self.initflag.get() { return Ok(()); } let ret = { let handle = *self.handle.borrow(); if handle.is_null() { sgx_status_t::SGX_SUCCESS } else { rsgx_ecc256_close_context(handle) } }; match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(false); *self.handle.borrow_mut() = ptr::null_mut(); Ok(()) } _ => Err(ret), } } } impl Default for SgxEccHandle { fn default() -> Self { Self::new() } } impl Drop for SgxEccHandle { /// /// close cleans up and deallocates the ECC 256 GF(p) state that was allocated in function open. /// fn drop(&mut self) { let _ = self.close(); } } /// /// The rsgx_rsa3072_sign_msg computes a digital signature for a given dataset based on RSA 3072 private key. /// /// # Description /// /// This function computes a digital signature over the input dataset based on the RSA 3072 private key. /// /// A message digest is a fixed size number derived from the original message with an applied hash function /// over the binary code of the message. (SHA256 in this case) /// /// The signer's private key and the message digest are used to create a signature. /// /// The scheme used for computing a digital signature is of the RSASSA-PKCS1-v1_5 scheme. /// /// # Parameters /// /// **data** /// /// A pointer to the data to calculate the signature over. /// /// **key** /// /// A pointer to the RSA key. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// The signature generated by this function. /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The RSA key, data is NULL. Or the data size is 0. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The signature generation process failed due to an internal cryptography library failure. /// pub fn rsgx_rsa3072_sign_msg<T>(data: &T, key: &sgx_rsa3072_key_t) -> SgxResult<sgx_rsa3072_signature_t> where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut sign = sgx_rsa3072_signature_t::default(); let ret = unsafe { sgx_rsa3072_sign( data as *const _ as *const u8, size as u32, key as *const sgx_rsa3072_key_t, &mut sign as *mut sgx_rsa3072_signature_t, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(sign), _ => Err(ret), } } /// /// The rsgx_rsa3072_sign_slice computes a digital signature for a given dataset based on RSA 3072 private key. /// pub fn rsgx_rsa3072_sign_slice<T>(data: &[T], key: &sgx_rsa3072_key_t) -> SgxResult<sgx_rsa3072_signature_t> where T: Copy + ContiguousMemory, { let size = mem::size_of_val(data); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut sign = sgx_rsa3072_signature_t::default(); let ret = unsafe { sgx_rsa3072_sign( data.as_ptr() as *const _ as *const u8, size as u32, key as *const sgx_rsa3072_key_t, &mut sign as *mut sgx_rsa3072_signature_t, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(sign), _ => Err(ret), } } /// /// rsgx_rsa3072_verify_msg verifies the input digital signature for the given data- set based on the RSA 3072 public key. /// /// # Description /// /// This function verifies the signature for the given data set based on the input RSA 3072 public key. /// /// A digital signature over a message is a buffer of 384-bytes, which could be created by function: rsgx_rsa3072_sign. /// The scheme used for computing a digital signature is of the RSASSA-PKCS1-v1_5 scheme. /// /// # Parameters /// /// **data** /// /// A pointer to the signed dataset to be verified. /// /// **public** /// /// A pointer to the public key to be used in the calculation of the signature. /// /// **signature** /// /// A pointer to the signature to be verified. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Return value /// /// **true** /// /// Digital signature is valid. /// /// **false** /// /// Digital signature is not valid. /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// The private key, data is NULL. Or the data size is 0. /// /// **SGX_ERROR_OUT_OF_MEMORY** /// /// Not enough memory is available to complete this operation. /// /// **SGX_ERROR_UNEXPECTED** /// /// The verification process failed due to an internal cryptography library failure. /// pub fn rsgx_rsa3072_verify_msg<T>( data: &T, public: &sgx_rsa3072_public_key_t, signature: &sgx_rsa3072_signature_t, ) -> SgxResult<bool> where T: Copy + ContiguousMemory, { let size = mem::size_of::<T>(); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } unsafe { let mut verify = sgx_rsa_result_t::SGX_RSA_INVALID_SIGNATURE; let ret = sgx_rsa3072_verify( data as *const _ as *const u8, size as u32, public as *const sgx_rsa3072_public_key_t, signature as *const sgx_rsa3072_signature_t, &mut verify as *mut sgx_rsa_result_t, ); match ret { sgx_status_t::SGX_SUCCESS => { match verify { sgx_rsa_result_t::SGX_RSA_VALID => Ok(true), _ => Ok(false), } } _ => Err(ret), } } } /// /// rsgx_rsa3072_verify_slice verifies the input digital signature for the given data- set based on the RSA 3072 public key. /// pub fn rsgx_rsa3072_verify_slice<T>( data: &[T], public: &sgx_rsa3072_public_key_t, signature: &sgx_rsa3072_signature_t, ) -> SgxResult<bool> where T: Copy + ContiguousMemory, { let size = mem::size_of_val(data); if size == 0 { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if size > u32::max_value() as usize { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } unsafe { let mut verify = sgx_rsa_result_t::SGX_RSA_INVALID_SIGNATURE; let ret = sgx_rsa3072_verify( data.as_ptr() as *const _ as *const u8, size as u32, public as *const sgx_rsa3072_public_key_t, signature as *const sgx_rsa3072_signature_t, &mut verify as *mut sgx_rsa_result_t, ); match ret { sgx_status_t::SGX_SUCCESS => { match verify { sgx_rsa_result_t::SGX_RSA_VALID => Ok(true), _ => Ok(false), } } _ => Err(ret), } } } #[allow(clippy::many_single_char_names)] pub fn rsgx_create_rsa_key_pair( n_byte_size: i32, e_byte_size: i32, n: &mut [u8], d: &mut [u8], e: &mut [u8], p: &mut [u8], q: &mut [u8], dmp1: &mut [u8], dmq1: &mut [u8], iqmp: &mut [u8], ) -> SgxError { if (n_byte_size <= 0) || (e_byte_size <= 0) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (n.is_empty()) || (n.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (d.is_empty()) || (d.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (e.is_empty()) || (e.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (p.is_empty()) || (p.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (q.is_empty()) || (q.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (dmp1.is_empty()) || (dmp1.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (dmq1.is_empty()) || (dmq1.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (iqmp.is_empty()) || (iqmp.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let ret = unsafe { sgx_create_rsa_key_pair( n_byte_size, e_byte_size, n.as_mut_ptr(), d.as_mut_ptr(), e.as_mut_ptr(), p.as_mut_ptr(), q.as_mut_ptr(), dmp1.as_mut_ptr(), dmq1.as_mut_ptr(), iqmp.as_mut_ptr(), ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } fn rsgx_create_rsa_priv2_key( mod_size: i32, exp_size: i32, e: &[u8], p: &[u8], q: &[u8], dmp1: &[u8], dmq1: &[u8], iqmp: &[u8], new_pri_key: &mut sgx_rsa_key_t, ) -> sgx_status_t { if (mod_size <= 0) || (exp_size <= 0) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (e.is_empty()) || (e.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (p.is_empty()) || (p.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (q.is_empty()) || (q.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (dmp1.is_empty()) || (dmp1.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (dmq1.is_empty()) || (dmq1.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (iqmp.is_empty()) || (iqmp.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_create_rsa_priv2_key( mod_size, exp_size, e.as_ptr(), p.as_ptr(), q.as_ptr(), dmp1.as_ptr(), dmq1.as_ptr(), iqmp.as_ptr(), new_pri_key as *mut sgx_rsa_key_t, ) } } fn rsgx_create_rsa_priv1_key( n_size: i32, e_size: i32, d_size: i32, n: &[u8], e: &[u8], d: &[u8], new_pri_key: &mut sgx_rsa_key_t, ) -> sgx_status_t { if (n_size <= 0) || (e_size <= 0) || (d_size <= 0) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (n.is_empty()) || (n.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (e.is_empty()) || (e.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (d.is_empty()) || (d.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_create_rsa_priv1_key( n_size, e_size, d_size, n.as_ptr(), e.as_ptr(), d.as_ptr(), new_pri_key as *mut sgx_rsa_key_t, ) } } fn rsgx_create_rsa_pub1_key( mod_size: i32, exp_size: i32, n: &[u8], e: &[u8], new_pub_key: &mut sgx_rsa_key_t, ) -> sgx_status_t { if (mod_size <= 0) || (exp_size <= 0) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (n.is_empty()) || (n.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if (e.is_empty()) || (e.len() > i32::max_value() as usize) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_create_rsa_pub1_key( mod_size, exp_size, n.as_ptr(), e.as_ptr(), new_pub_key as *mut sgx_rsa_key_t, ) } } fn rsgx_free_rsa_key( rsa_key: sgx_rsa_key_t, key_type: sgx_rsa_key_type_t, mod_size: i32, exp_size: i32, ) -> sgx_status_t { if (mod_size <= 0) || (exp_size <= 0) { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_free_rsa_key(rsa_key, key_type, mod_size, exp_size) } } fn rsgx_rsa_priv_decrypt_sha256( rsa_key: sgx_rsa_key_t, out_data: &mut [u8], out_len: &mut usize, in_data: &[u8], ) -> sgx_status_t { if in_data.is_empty() { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if * out_len != 0 && out_data.len() != * out_len { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { let p_out_data: *mut u8 = if *out_len != 0 { out_data.as_mut_ptr() } else { ptr::null_mut() }; sgx_rsa_priv_decrypt_sha256( rsa_key, p_out_data, out_len as *mut usize, in_data.as_ptr(), in_data.len(), ) } } fn rsgx_rsa_pub_encrypt_sha256( rsa_key: sgx_rsa_key_t, out_data: &mut [u8], out_len: &mut usize, in_data: &[u8], ) -> sgx_status_t { if in_data.is_empty() { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if * out_len != 0 && out_data.len() != * out_len { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { let p_out_data: *mut u8 = if *out_len != 0 { out_data.as_mut_ptr() } else { ptr::null_mut() }; sgx_rsa_pub_encrypt_sha256( rsa_key, p_out_data, out_len as *mut usize, in_data.as_ptr(), in_data.len(), ) } } pub struct SgxRsaPrivKey { key: RefCell<sgx_rsa_key_t>, mod_size: Cell<i32>, exp_size: Cell<i32>, createflag: Cell<bool>, } impl SgxRsaPrivKey { pub fn new() -> SgxRsaPrivKey { SgxRsaPrivKey { key: RefCell::new(ptr::null_mut() as sgx_rsa_key_t), mod_size: Cell::new(0), exp_size: Cell::new(0), createflag: Cell::new(false), } } #[inline] pub fn create( &self, mod_size: i32, exp_size: i32, e: &[u8], p: &[u8], q: &[u8], dmp1: &[u8], dmq1: &[u8], iqmp: &[u8], ) -> SgxError { self.create2(mod_size, exp_size, e, p, q, dmp1, dmq1, iqmp) } pub fn create2( &self, mod_size: i32, exp_size: i32, e: &[u8], p: &[u8], q: &[u8], dmp1: &[u8], dmq1: &[u8], iqmp: &[u8], ) -> SgxError { if self.createflag.get() { return Ok(()); } let ret = rsgx_create_rsa_priv2_key( mod_size, exp_size, e, p, q, dmp1, dmq1, iqmp, self.key.borrow_mut().deref_mut(), ); match ret { sgx_status_t::SGX_SUCCESS => { self.mod_size.set(mod_size); self.exp_size.set(exp_size); self.createflag.set(true); Ok(()) } _ => Err(ret), } } pub fn create1( &self, mod_size: i32, exp_size: i32, priv_exp_size: i32, n: &[u8], e: &[u8], d: &[u8], ) -> SgxError { if self.createflag.get() { return Ok(()); } let ret = rsgx_create_rsa_priv1_key( mod_size, exp_size, priv_exp_size, n, e, d, self.key.borrow_mut().deref_mut(), ); match ret { sgx_status_t::SGX_SUCCESS => { self.mod_size.set(mod_size); self.exp_size.set(exp_size); self.createflag.set(true); Ok(()) } _ => Err(ret), } } pub fn decrypt_sha256( &self, out_data: &mut [u8], out_len: &mut usize, in_data: &[u8], ) -> SgxError { if !self.createflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_rsa_priv_decrypt_sha256( *self.key.borrow(), out_data, out_len, in_data, ); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn free(&self) -> SgxError { if !self.createflag.get() { return Ok(()); } let ret = { let key = *self.key.borrow(); if key.is_null() { sgx_status_t::SGX_SUCCESS } else { rsgx_free_rsa_key( key, sgx_rsa_key_type_t::SGX_RSA_PRIVATE_KEY, self.mod_size.get(), self.exp_size.get(), ) } }; match ret { sgx_status_t::SGX_SUCCESS => { self.createflag.set(false); *self.key.borrow_mut() = ptr::null_mut(); Ok(()) } _ => Err(ret), } } } impl Default for SgxRsaPrivKey { fn default() -> Self { Self::new() } } impl Drop for SgxRsaPrivKey { fn drop(&mut self) { let _ = self.free(); } } pub struct SgxRsaPubKey { key: RefCell<sgx_rsa_key_t>, mod_size: Cell<i32>, exp_size: Cell<i32>, createflag: Cell<bool>, } impl SgxRsaPubKey { pub fn new() -> SgxRsaPubKey { SgxRsaPubKey { key: RefCell::new(ptr::null_mut() as sgx_rsa_key_t), mod_size: Cell::new(0), exp_size: Cell::new(0), createflag: Cell::new(false), } } pub fn create(
mod_size: i32, exp_size: i32, n: &[u8], e: &[u8], ) -> SgxError { if self.createflag.get() { return Ok(()); } let ret = rsgx_create_rsa_pub1_key( mod_size, exp_size, n, e, self.key.borrow_mut().deref_mut(), ); match ret { sgx_status_t::SGX_SUCCESS => { self.mod_size.set(mod_size); self.exp_size.set(exp_size); self.createflag.set(true); Ok(()) } _ => Err(ret), } } pub fn encrypt_sha256( &self, out_data: &mut [u8], out_len: &mut usize, in_data: &[u8], ) -> SgxError { if !self.createflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_rsa_pub_encrypt_sha256( *self.key.borrow(), out_data, out_len, in_data, ); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn free(&self) -> SgxError { if !self.createflag.get() { return Ok(()); } let ret = { let key = *self.key.borrow(); if key.is_null() { sgx_status_t::SGX_SUCCESS } else { rsgx_free_rsa_key( key, sgx_rsa_key_type_t::SGX_RSA_PUBLIC_KEY, self.mod_size.get(), self.exp_size.get(), ) } }; match ret { sgx_status_t::SGX_SUCCESS => { self.createflag.set(false); *self.key.borrow_mut() = ptr::null_mut(); Ok(()) } _ => Err(ret), } } } impl Default for SgxRsaPubKey { fn default() -> Self { Self::new() } } impl Drop for SgxRsaPubKey { fn drop(&mut self) { let _ = self.free(); } } /// /// rsgx_calculate_ecdsa_priv_key generates an ECDSA private key based on an input random seed. /// /// # Description /// /// This function generates an ECDSA private key based on an input random seed. /// /// # Parameters /// /// **hash_drg** /// /// Pointer to the input random seed. /// /// **sgx_nistp256_r_m1** /// /// Pointer to the buffer for n-1 where n is order of the ECC group used. /// /// **out_key** /// /// Pointer to the generated ECDSA private key. /// /// # Requirements /// /// Library: libsgx_tcrypto.a /// /// # Errors /// /// **SGX_ERROR_INVALID_PARAMETER** /// /// Some of the pointers are NULL, or the input size is 0. /// /// **SGX_ERROR_UNEXPECTED** /// /// Unexpected error occurred during the ECDSA private key generation. /// pub fn rsgx_calculate_ecdsa_priv_key( hash_drg: &[u8], sgx_nistp256_r_m1: &[u8], out_key: &mut [u8], ) -> SgxError { if (hash_drg.is_empty()) || (hash_drg.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (sgx_nistp256_r_m1.is_empty()) || (sgx_nistp256_r_m1.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (out_key.is_empty()) || (out_key.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let ret = unsafe { sgx_calculate_ecdsa_priv_key( hash_drg.as_ptr(), hash_drg.len() as i32, sgx_nistp256_r_m1.as_ptr(), sgx_nistp256_r_m1.len() as i32, out_key.as_mut_ptr(), out_key.len() as i32, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn rsgx_ecc256_calculate_pub_from_priv( priv_key: &sgx_ec256_private_t, pub_key: &mut sgx_ec256_public_t, ) -> SgxError { let ret = unsafe { sgx_ecc256_calculate_pub_from_priv( priv_key as *const sgx_ec256_private_t, pub_key as *mut sgx_ec256_public_t, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn rsgx_ecc256_priv_key(hash_drg: &[u8], sgx_nistp256_r_m1: &[u8]) -> SgxResult<sgx_ec256_private_t> { if (hash_drg.is_empty()) || (hash_drg.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } if (sgx_nistp256_r_m1.is_empty()) || (sgx_nistp256_r_m1.len() > i32::max_value() as usize) { return Err(sgx_status_t::SGX_ERROR_INVALID_PARAMETER); } let mut priv_key = sgx_ec256_private_t::default(); let ret = unsafe { sgx_calculate_ecdsa_priv_key( hash_drg.as_ptr(), hash_drg.len() as i32, sgx_nistp256_r_m1.as_ptr(), sgx_nistp256_r_m1.len() as i32, &mut priv_key as *mut sgx_ec256_private_t as *mut u8, mem::size_of::<sgx_ec256_private_t>() as i32, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(priv_key), _ => Err(ret), } } pub fn rsgx_ecc256_pub_from_priv(priv_key: &sgx_ec256_private_t) -> SgxResult<sgx_ec256_public_t> { let mut pub_key = sgx_ec256_public_t::default(); let ret = unsafe { sgx_ecc256_calculate_pub_from_priv( priv_key as *const sgx_ec256_private_t, &mut pub_key as *mut sgx_ec256_public_t, ) }; match ret { sgx_status_t::SGX_SUCCESS => Ok(pub_key), _ => Err(ret), } } fn rsgx_aes_gcm128_enc_init( key: &sgx_aes_gcm_128bit_key_t, iv: &[u8], aad: &[u8], aes_gcm_state: &mut sgx_aes_state_handle_t, ) -> sgx_status_t { let iv_len = iv.len(); if iv_len != SGX_AESGCM_IV_SIZE { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } let aad_len = aad.len(); if aad_len > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { let p_aad = if !aad.is_empty() { aad.as_ptr() } else { ptr::null() }; sgx_aes_gcm128_enc_init( key as *const sgx_aes_gcm_128bit_key_t as *const u8, iv.as_ptr(), iv_len as u32, p_aad, aad_len as u32, aes_gcm_state as *mut sgx_aes_state_handle_t, ) } } fn rsgx_aes_gcm128_enc_update( src: &[u8], dst: &mut [u8], aes_gcm_state: sgx_aes_state_handle_t, ) -> sgx_status_t { let src_len = src.len(); if src_len > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if src_len == 0 { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } let dst_len = dst.len(); if dst_len > u32::max_value() as usize { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } if dst_len == 0 || dst_len < src_len { return sgx_status_t::SGX_ERROR_INVALID_PARAMETER; } unsafe { sgx_aes_gcm128_enc_update( src.as_ptr(), src_len as u32, dst.as_mut_ptr(), aes_gcm_state, ) } } fn rsgx_aes_gcm128_enc_get_mac( mac: &mut sgx_aes_gcm_128bit_tag_t, aes_gcm_state: sgx_aes_state_handle_t, ) -> sgx_status_t { unsafe { sgx_aes_gcm128_enc_get_mac(mac as *mut sgx_aes_gcm_128bit_tag_t as *mut u8, aes_gcm_state) } } fn rsgx_aes_gcm_close(aes_gcm_state: sgx_aes_state_handle_t) -> sgx_status_t { unsafe { sgx_aes_gcm_close(aes_gcm_state) } } pub struct SgxAesHandle { handle: RefCell<sgx_aes_state_handle_t>, initflag: Cell<bool>, } impl SgxAesHandle { pub fn new() -> SgxAesHandle { SgxAesHandle{ handle: RefCell::new(ptr::null_mut() as sgx_aes_state_handle_t), initflag: Cell::new(false), } } pub fn init(&self, key: &sgx_aes_gcm_128bit_key_t, iv: &[u8], aad: &[u8]) -> SgxError { if self.initflag.get() { return Ok(()); } let ret = rsgx_aes_gcm128_enc_init(key, iv, aad, self.handle.borrow_mut().deref_mut()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn update(&self, src: &[u8], dst: &mut [u8]) -> SgxError { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let ret = rsgx_aes_gcm128_enc_update(src, dst, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(()), _ => Err(ret), } } pub fn get_mac(&self) -> SgxResult<sgx_aes_gcm_128bit_tag_t> { if !self.initflag.get() { return Err(sgx_status_t::SGX_ERROR_INVALID_STATE); } let mut mac = sgx_aes_gcm_128bit_tag_t::default(); let ret = rsgx_aes_gcm128_enc_get_mac(&mut mac, *self.handle.borrow()); match ret { sgx_status_t::SGX_SUCCESS => Ok(mac), _ => Err(ret), } } pub fn close(&self) -> SgxError { if !self.initflag.get() { return Ok(()); } let ret = { let handle = *self.handle.borrow(); if handle.is_null() { sgx_status_t::SGX_SUCCESS } else { rsgx_aes_gcm_close(handle) } }; match ret { sgx_status_t::SGX_SUCCESS => { self.initflag.set(false); *self.handle.borrow_mut() = ptr::null_mut(); Ok(()) } _ => Err(ret), } } } impl Default for SgxAesHandle { fn default() -> Self { Self::new() } } impl Drop for SgxAesHandle { fn drop(&mut self) { let _ = self.close(); } }
&self,
utils.rs
pub fn rmse(x: &Vec<f32>, y: &Vec<f32>) -> f32 { // for semantics vs targets if x.len() != y.len() { // use assert!() ? panic!("cannot rmse vectors of different length!"); } let n = x.len() as f32; let mapper = x.into_iter() .zip(y.into_iter()) .map(|(x, y)| ((x - y) as f32).powi(2)); let se: f32 = mapper.sum(); // or .reduce(|| 0, |a, b| a + b) (se / n).sqrt() } pub fn add(x: Vec<f32>, y: Vec<f32>) -> Vec<f32> { if x.len() != y.len() { panic!("cannot sum element-wise vectors of different length!"); } else { x.into_iter() .zip(y.into_iter()) .map(|(x, y)| x + y) .collect() } } pub fn subtract(x: Vec<f32>, y: Vec<f32>) -> Vec<f32> { if x.len() != y.len()
else { x.into_iter() .zip(y.into_iter()) .map(|(x, y)| x - y) .collect() } } pub fn multiply(x: Vec<f32>, y: Vec<f32>) -> Vec<f32> { if x.len() != y.len() { panic!("cannot multiply element-wise vectors of different length!"); } else { x.into_iter() // or rayon::prelude::par_iter() .zip(y.into_iter()) .map(|(x, y)| x * y) .collect() } } /* fn dot_product(vec1: &[f32], vec2: &[f32]) -> f32 { vec1.par_iter() .zip(vec2) .map(|e1, e2| e1 * e2) .collect() } */ pub fn cosine(x: Vec<f32>) -> Vec<f32> { x.into_iter().map(|x| x.cos()).collect() } /// Element-wise logistic function. Mainly for use of the Geometric Semantic Genetic Programming pub fn logistic_function(x: Vec<f32>) -> Vec<f32> { let e = |y: f32| -> f32 { 1.0 / (1.0 + (-y).exp()) }; x.into_iter().map(e).collect() } pub fn divide(x: Vec<f32>, y: Vec<f32>) -> Vec<f32> { if x.len() != y.len() { panic!("cannot divide element-wise vectors of different length!"); } let lower_limit = 0.00001; // this can totally influence semantics! let protected_division = |(n, d): (f32, f32)| -> f32 { // numerator and denominator if d.abs() > lower_limit { return n / d; } else { return n / 1.0; } }; x.into_iter() .zip(y.into_iter()) .map(protected_division) .collect() }
{ panic!("cannot subtract element-wise vectors of different length!"); }
_univariate_selection.py
"""Univariate features selection.""" # Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay. # L. Buitinck, A. Joly # License: BSD 3 clause import numpy as np import warnings from scipy import special, stats from scipy.sparse import issparse from ..base import BaseEstimator from ..preprocessing import LabelBinarizer from ..utils import (as_float_array, check_array, check_X_y, safe_sqr, safe_mask) from ..utils.extmath import safe_sparse_dot, row_norms from ..utils.validation import check_is_fitted from ._base import SelectorMixin def _clean_nans(scores): """ Fixes Issue #1240: NaNs can't be properly compared, so change them to the smallest value of scores's dtype. -inf seems to be unreliable. """ # XXX where should this function be called? fit? scoring functions # themselves? scores = as_float_array(scores, copy=True) scores[np.isnan(scores)] = np.finfo(scores.dtype).min return scores ###################################################################### # Scoring functions # The following function is a rewriting of scipy.stats.f_oneway # Contrary to the scipy.stats.f_oneway implementation it does not # copy the data while keeping the inputs unchanged. def f_oneway(*args): """Performs a 1-way ANOVA. The one-way ANOVA tests the null hypothesis that 2 or more groups have the same population mean. The test is applied to samples from two or more groups, possibly with differing sizes. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- *args : {array-like, sparse matrix} sample1, sample2... The sample measurements should be given as arguments. Returns ------- f_statistic : float The computed F-value of the test. p_value : float The associated p-value from the F-distribution. Notes ----- The ANOVA test has important assumptions that must be satisfied in order for the associated p-value to be valid. 1. The samples are independent 2. Each sample is from a normally distributed population 3. The population standard deviations of the groups are all equal. This property is known as homoscedasticity. If these assumptions are not true for a given set of data, it may still be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although with some loss of power. The algorithm is from Heiman[2], pp.394-7. See ``scipy.stats.f_oneway`` that should give the same results while being less efficient. References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 14. http://faculty.vassar.edu/lowry/ch14pt1.html .. [2] Heiman, G.W. Research Methods in Statistics. 2002. """ n_classes = len(args) args = [as_float_array(a) for a in args] n_samples_per_class = np.array([a.shape[0] for a in args]) n_samples = np.sum(n_samples_per_class) ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args) sums_args = [np.asarray(a.sum(axis=0)) for a in args] square_of_sums_alldata = sum(sums_args) ** 2 square_of_sums_args = [s ** 2 for s in sums_args] sstot = ss_alldata - square_of_sums_alldata / float(n_samples) ssbn = 0. for k, _ in enumerate(args): ssbn += square_of_sums_args[k] / n_samples_per_class[k] ssbn -= square_of_sums_alldata / float(n_samples) sswn = sstot - ssbn dfbn = n_classes - 1 dfwn = n_samples - n_classes msb = ssbn / float(dfbn) msw = sswn / float(dfwn) constant_features_idx = np.where(msw == 0.)[0] if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size): warnings.warn("Features %s are constant." % constant_features_idx, UserWarning) f = msb / msw # flatten matrix to vector in sparse case f = np.asarray(f).ravel() prob = special.fdtrc(dfbn, dfwn, f) return f, prob def f_classif(X, y): """Compute the ANOVA F-value for the provided sample. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The set of regressors that will be tested sequentially. y : ndarray of shape (n_samples,) The target vector. Returns ------- f_statistic : ndarray of shape (n_features,) F-statistic for each feature. p_values : ndarray of shape (n_features,) P-values associated with the F-statistic. See Also -------- chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. """ X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo']) args = [X[safe_mask(X, y == k)] for k in np.unique(y)] return f_oneway(*args) def _chisquare(f_obs, f_exp): """Fast replacement for scipy.stats.chisquare. Version from https://github.com/scipy/scipy/pull/2525 with additional optimizations. """ f_obs = np.asarray(f_obs, dtype=np.float64) k = len(f_obs) # Reuse f_obs for chi-squared statistics chisq = f_obs chisq -= f_exp chisq **= 2 with np.errstate(invalid="ignore"): chisq /= f_exp chisq = chisq.sum(axis=0) return chisq, special.chdtrc(k - 1, chisq) def chi2(X, y):
This score can be used to select the n_features features with the highest values for the test chi-squared statistic from X, which must contain only non-negative features such as booleans or frequencies (e.g., term counts in document classification), relative to the classes. Recall that the chi-square test measures dependence between stochastic variables, so using this function "weeds out" the features that are the most likely to be independent of class and therefore irrelevant for classification. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample vectors. y : array-like of shape (n_samples,) Target vector (class labels). Returns ------- chi2 : ndarray of shape (n_features,) Chi2 statistics for each feature. p_values : ndarray of shape (n_features,) P-values for each feature. Notes ----- Complexity of this algorithm is O(n_classes * n_features). See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. f_regression : F-value between label/feature for regression tasks. """ # XXX: we might want to do some of the following in logspace instead for # numerical stability. X = check_array(X, accept_sparse='csr') if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative.") Y = LabelBinarizer().fit_transform(y) if Y.shape[1] == 1: Y = np.append(1 - Y, Y, axis=1) observed = safe_sparse_dot(Y.T, X) # n_classes * n_features feature_count = X.sum(axis=0).reshape(1, -1) class_prob = Y.mean(axis=0).reshape(1, -1) expected = np.dot(class_prob.T, feature_count) return _chisquare(observed, expected) def r_regression(X, y, *, center=True): """Compute Pearson's r for each features and the target. Pearson's r is also known as the Pearson correlation coefficient. .. versionadded:: 1.0 Linear model for testing the individual effect of each of many regressors. This is a scoring function to be used in a feature selection procedure, not a free standing feature selection procedure. The cross correlation between each regressor and the target is computed as ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) * std(y)). For more on usage see the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix. y : array-like of shape (n_samples,) The target vector. center : bool, default=True Whether or not to center the data matrix `X` and the target vector `y`. By default, `X` and `y` will be centered. Returns ------- correlation_coefficient : ndarray of shape (n_features,) Pearson's R correlation coefficients of features. See Also -------- f_regression: Univariate linear regression tests returning f-statistic and p-values mutual_info_regression: Mutual information for a continuous target. f_classif: ANOVA F-value between label/feature for classification tasks. chi2: Chi-squared stats of non-negative features for classification tasks. """ X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) n_samples = X.shape[0] # Compute centered values # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we # need not center X if center: y = y - np.mean(y) if issparse(X): X_means = X.mean(axis=0).getA1() else: X_means = X.mean(axis=0) # Compute the scaled standard deviations via moments X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means ** 2) else: X_norms = row_norms(X.T) correlation_coefficient = safe_sparse_dot(y, X) correlation_coefficient /= X_norms correlation_coefficient /= np.linalg.norm(y) return correlation_coefficient def f_regression(X, y, *, center=True): """Univariate linear regression tests returning F-statistic and p-values. Quick linear model for testing the effect of a single regressor, sequentially for many regressors. This is done in 2 steps: 1. The cross correlation between each regressor and the target is computed, that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) * std(y)) using r_regression function. 2. It is converted to an F score and then to a p-value. :func:`f_regression` is derived from :func:`r_regression` and will rank features in the same order if all the features are positively correlated with the target. Note however that contrary to :func:`f_regression`, :func:`r_regression` values lie in [-1, 1] and can thus be negative. :func:`f_regression` is therefore recommended as a feature selection criterion to identify potentially predictive feature for a downstream classifier, irrespective of the sign of the association with the target variable. Furthermore :func:`f_regression` returns p-values while :func:`r_regression` does not. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix. y : array-like of shape (n_samples,) The target vector. center : bool, default=True Whether or not to center the data matrix `X` and the target vector `y`. By default, `X` and `y` will be centered. Returns ------- f_statistic : ndarray of shape (n_features,) F-statistic for each feature. p_values : ndarray of shape (n_features,) P-values associated with the F-statistic. See Also -------- r_regression: Pearson's R between label/feature for regression tasks. f_classif: ANOVA F-value between label/feature for classification tasks. chi2: Chi-squared stats of non-negative features for classification tasks. SelectKBest: Select features based on the k highest scores. SelectFpr: Select features based on a false positive rate test. SelectFdr: Select features based on an estimated false discovery rate. SelectFwe: Select features based on family-wise error rate. SelectPercentile: Select features based on percentile of the highest scores. """ correlation_coefficient = r_regression(X, y, center=center) deg_of_freedom = y.size - (2 if center else 1) corr_coef_squared = correlation_coefficient ** 2 f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom p_values = stats.f.sf(f_statistic, 1, deg_of_freedom) return f_statistic, p_values ###################################################################### # Base classes class _BaseFilter(SelectorMixin, BaseEstimator): """Initialize the univariate feature selection. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. """ def __init__(self, score_func): self.score_func = score_func def fit(self, X, y): """Run score function on (X, y) and get the appropriate features. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values (class labels in classification, real numbers in regression). Returns ------- self : object """ X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'], multi_output=True) if not callable(self.score_func): raise TypeError("The score function should be a callable, %s (%s) " "was passed." % (self.score_func, type(self.score_func))) self._check_params(X, y) score_func_ret = self.score_func(X, y) if isinstance(score_func_ret, (list, tuple)): self.scores_, self.pvalues_ = score_func_ret self.pvalues_ = np.asarray(self.pvalues_) else: self.scores_ = score_func_ret self.pvalues_ = None self.scores_ = np.asarray(self.scores_) return self def _check_params(self, X, y): pass def _more_tags(self): return {'requires_y': True} ###################################################################### # Specific filters ###################################################################### class SelectPercentile(_BaseFilter): """Select features according to a percentile of the highest scores. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. Default is f_classif (see below "See Also"). The default function only works with classification tasks. .. versionadded:: 0.18 percentile : int, default=10 Percent of features to keep. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores, None if `score_func` returned only scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.feature_selection import SelectPercentile, chi2 >>> X, y = load_digits(return_X_y=True) >>> X.shape (1797, 64) >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y) >>> X_new.shape (1797, 7) Notes ----- Ties between features with equal scores will be broken in an unspecified way. See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. mutual_info_classif : Mutual information for a discrete target. chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. mutual_info_regression : Mutual information for a continuous target. SelectKBest : Select features based on the k highest scores. SelectFpr : Select features based on a false positive rate test. SelectFdr : Select features based on an estimated false discovery rate. SelectFwe : Select features based on family-wise error rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, *, percentile=10): super().__init__(score_func=score_func) self.percentile = percentile def _check_params(self, X, y): if not 0 <= self.percentile <= 100: raise ValueError("percentile should be >=0, <=100; got %r" % self.percentile) def _get_support_mask(self): check_is_fitted(self) # Cater for NaNs if self.percentile == 100: return np.ones(len(self.scores_), dtype=bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=bool) scores = _clean_nans(self.scores_) threshold = np.percentile(scores, 100 - self.percentile) mask = scores > threshold ties = np.where(scores == threshold)[0] if len(ties): max_feats = int(len(scores) * self.percentile / 100) kept_ties = ties[:max_feats - mask.sum()] mask[kept_ties] = True return mask class SelectKBest(_BaseFilter): """Select features according to the k highest scores. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. Default is f_classif (see below "See Also"). The default function only works with classification tasks. .. versionadded:: 0.18 k : int or "all", default=10 Number of top features to select. The "all" option bypasses selection, for use in a parameter search. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores, None if `score_func` returned only scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.feature_selection import SelectKBest, chi2 >>> X, y = load_digits(return_X_y=True) >>> X.shape (1797, 64) >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y) >>> X_new.shape (1797, 20) Notes ----- Ties between features with equal scores will be broken in an unspecified way. See Also -------- f_classif: ANOVA F-value between label/feature for classification tasks. mutual_info_classif: Mutual information for a discrete target. chi2: Chi-squared stats of non-negative features for classification tasks. f_regression: F-value between label/feature for regression tasks. mutual_info_regression: Mutual information for a continuous target. SelectPercentile: Select features based on percentile of the highest scores. SelectFpr : Select features based on a false positive rate test. SelectFdr : Select features based on an estimated false discovery rate. SelectFwe : Select features based on family-wise error rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, *, k=10): super().__init__(score_func=score_func) self.k = k def _check_params(self, X, y): if not (self.k == "all" or 0 <= self.k <= X.shape[1]): raise ValueError("k should be >=0, <= n_features = %d; got %r. " "Use k='all' to return all features." % (X.shape[1], self.k)) def _get_support_mask(self): check_is_fitted(self) if self.k == 'all': return np.ones(self.scores_.shape, dtype=bool) elif self.k == 0: return np.zeros(self.scores_.shape, dtype=bool) else: scores = _clean_nans(self.scores_) mask = np.zeros(scores.shape, dtype=bool) # Request a stable sort. Mergesort takes more memory (~40MB per # megafeature on x86-64). mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1 return mask class SelectFpr(_BaseFilter): """Filter: Select the pvalues below alpha based on a FPR test. FPR test stands for False Positive Rate test. It controls the total amount of false detections. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). Default is f_classif (see below "See Also"). The default function only works with classification tasks. alpha : float, default=5e-2 The highest p-value for features to be kept. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 Examples -------- >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.feature_selection import SelectFpr, chi2 >>> X, y = load_breast_cancer(return_X_y=True) >>> X.shape (569, 30) >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y) >>> X_new.shape (569, 16) See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. chi2 : Chi-squared stats of non-negative features for classification tasks. mutual_info_classif: Mutual information for a discrete target. f_regression : F-value between label/feature for regression tasks. mutual_info_regression : Mutual information for a continuous target. SelectPercentile : Select features based on percentile of the highest scores. SelectKBest : Select features based on the k highest scores. SelectFdr : Select features based on an estimated false discovery rate. SelectFwe : Select features based on family-wise error rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, *, alpha=5e-2): super().__init__(score_func=score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self) return self.pvalues_ < self.alpha class SelectFdr(_BaseFilter): """Filter: Select the p-values for an estimated false discovery rate This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound on the expected false discovery rate. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). Default is f_classif (see below "See Also"). The default function only works with classification tasks. alpha : float, default=5e-2 The highest uncorrected p-value for features to keep. Examples -------- >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.feature_selection import SelectFdr, chi2 >>> X, y = load_breast_cancer(return_X_y=True) >>> X.shape (569, 30) >>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y) >>> X_new.shape (569, 16) Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 References ---------- https://en.wikipedia.org/wiki/False_discovery_rate See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. mutual_info_classif : Mutual information for a discrete target. chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. mutual_info_regression : Mutual information for a contnuous target. SelectPercentile : Select features based on percentile of the highest scores. SelectKBest : Select features based on the k highest scores. SelectFpr : Select features based on a false positive rate test. SelectFwe : Select features based on family-wise error rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, *, alpha=5e-2): super().__init__(score_func=score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self) n_features = len(self.pvalues_) sv = np.sort(self.pvalues_) selected = sv[sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1)] if selected.size == 0: return np.zeros_like(self.pvalues_, dtype=bool) return self.pvalues_ <= selected.max() class SelectFwe(_BaseFilter): """Filter: Select the p-values corresponding to Family-wise error rate Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). Default is f_classif (see below "See Also"). The default function only works with classification tasks. alpha : float, default=5e-2 The highest uncorrected p-value for features to keep. Examples -------- >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.feature_selection import SelectFwe, chi2 >>> X, y = load_breast_cancer(return_X_y=True) >>> X.shape (569, 30) >>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y) >>> X_new.shape (569, 15) Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. SelectPercentile : Select features based on percentile of the highest scores. SelectKBest : Select features based on the k highest scores. SelectFpr : Select features based on a false positive rate test. SelectFdr : Select features based on an estimated false discovery rate. GenericUnivariateSelect : Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, *, alpha=5e-2): super().__init__(score_func=score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self) return (self.pvalues_ < self.alpha / len(self.pvalues_)) ###################################################################### # Generic filter ###################################################################### # TODO this class should fit on either p-values or scores, # depending on the mode. class GenericUnivariateSelect(_BaseFilter): """Univariate feature selector with configurable strategy. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable, default=f_classif Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). For modes 'percentile' or 'kbest' it can return a single array scores. mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile' Feature selection mode. param : float or int depending on the feature selection mode, default=1e-5 Parameter of the corresponding mode. Attributes ---------- scores_ : array-like of shape (n_features,) Scores of features. pvalues_ : array-like of shape (n_features,) p-values of feature scores, None if `score_func` returned scores only. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 Examples -------- >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.feature_selection import GenericUnivariateSelect, chi2 >>> X, y = load_breast_cancer(return_X_y=True) >>> X.shape (569, 30) >>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20) >>> X_new = transformer.fit_transform(X, y) >>> X_new.shape (569, 20) See Also -------- f_classif : ANOVA F-value between label/feature for classification tasks. mutual_info_classif : Mutual information for a discrete target. chi2 : Chi-squared stats of non-negative features for classification tasks. f_regression : F-value between label/feature for regression tasks. mutual_info_regression : Mutual information for a continuous target. SelectPercentile : Select features based on percentile of the highest scores. SelectKBest : Select features based on the k highest scores. SelectFpr : Select features based on a false positive rate test. SelectFdr : Select features based on an estimated false discovery rate. SelectFwe : Select features based on family-wise error rate. """ _selection_modes: dict = {'percentile': SelectPercentile, 'k_best': SelectKBest, 'fpr': SelectFpr, 'fdr': SelectFdr, 'fwe': SelectFwe} def __init__(self, score_func=f_classif, *, mode='percentile', param=1e-5): super().__init__(score_func=score_func) self.mode = mode self.param = param def _make_selector(self): selector = self._selection_modes[self.mode](score_func=self.score_func) # Now perform some acrobatics to set the right named parameter in # the selector possible_params = selector._get_param_names() possible_params.remove('score_func') selector.set_params(**{possible_params[0]: self.param}) return selector def _check_params(self, X, y): if self.mode not in self._selection_modes: raise ValueError("The mode passed should be one of %s, %r," " (type %s) was passed." % (self._selection_modes.keys(), self.mode, type(self.mode))) self._make_selector()._check_params(X, y) def _get_support_mask(self): check_is_fitted(self) selector = self._make_selector() selector.pvalues_ = self.pvalues_ selector.scores_ = self.scores_ return selector._get_support_mask()
"""Compute chi-squared stats between each non-negative feature and class.
tabbed-line-chart-group.component.js
import { ChangeDetectorRef, Component, EventEmitter, Input, Output, ViewChildren } from '@angular/core'; import { LineChartComponent } from '../line-chart.component'; var TabbedLineChartGroupComponent = (function () { function TabbedLineChartGroupComponent(changeDetectorRef) { this.changeDetectorRef = changeDetectorRef; this.chartsPerRow = 3; this.onTabClick = new EventEmitter(); } TabbedLineChartGroupComponent.prototype.ngOnInit = function () { if (this.tabList) { this.selectedTab = MsftSme.first(this.tabList); this.getTabGroupDimensions(); // tooltip formatting functions this.tooltipFormatters = this.tooltipFormatters ? this.tooltipFormatters : []; for (var i = 0; i < this.groupData.length; i++) { if (!this.tooltipFormatters[i]) { // if no formatting funcion is supplied, then null ensures the data will not be formatted this.tooltipFormatters[i] = null; } } } else { throw new Error('tabList input is required to render tabbed line chart group'); } }; TabbedLineChartGroupComponent.prototype.getTabGroupDimensions = function () { // two and three charts per row use custom css classes if (this.chartsPerRow !== 2 && this.chartsPerRow !== 3) { // add one to leave space for margins between graphs var tabPercent = 100 / (this.chartsPerRow + 1); this.tabGroupWidth = '{0}%'.format(tabPercent); this.tabGroupMargin = '16px'; } }; /** * Set selected tab and emit this event */ TabbedLineChartGroupComponent.prototype.setTab = function (tab, index) { this.selectedTab = tab; this.onTabClick.emit(index); this.refresh(); }; /** * Refresh all of the charts when new data is added to lineChartData input */ TabbedLineChartGroupComponent.prototype.refresh = function () { this.changeDetectorRef.detectChanges(); if (this.lineChartComponents) { this.lineChartComponents.forEach(function (lineChart) { lineChart.refresh(); }); } }; /** * Get line chart data for selected tab */ TabbedLineChartGroupComponent.prototype.getLineChartData = function (tabChartData) { return tabChartData.get(this.selectedTab); }; /** * Check whether the LineChartData.isLoading for that tab is set to true or false */ TabbedLineChartGroupComponent.prototype.dataIsLoading = function (tabChartData) { return this.getLineChartData(tabChartData).isLoading; }; TabbedLineChartGroupComponent.prototype.ngOnDestroy = function () { this.changeDetectorRef.detach(); }; return TabbedLineChartGroupComponent; }()); export { TabbedLineChartGroupComponent }; TabbedLineChartGroupComponent.decorators = [ { type: Component, args: [{ selector: 'sme-tabbed-line-chart-group', template: "\n <ul class=\"nav nav-tabs tab-group\" [ngStyle]=\"{'margin-right': tabGroupMargin}\" role=\"tablist\" role=\"presentation\" *ngIf=\"tabList.length > 1\">\n <li *ngFor=\"let tab of tabList; let i = index\" [ngClass]=\"{'active': tab === selectedTab}\">\n <a role=\"tab\" data-toggle=\"tab\" (click)=\"setTab(tab, i)\">{{ tab }}</a>\n </li>\n </ul>\n\n <div class=\"dashboard\">\n <div class=\"chart-group\" [ngClass]=\"{'two-col-group': chartsPerRow === 2, 'three-col-group': chartsPerRow === 3}\" [ngStyle]=\"{'width': tabGroupWidth, 'margin-right': tabGroupMargin}\" *ngFor=\"let tabChartData of groupData; let i = index\">\n <sme-loading-wheel *ngIf=\"loadingWheels && dataIsLoading(tabChartData)\" [message]=\"loadingMessage\"></sme-loading-wheel>\n <sme-line-chart class=\"fixed-flex-size\" [tooltipFormatter]=\"tooltipFormatters[i]\" [lineChartData]=\"getLineChartData(tabChartData)\"></sme-line-chart>\n </div>\n </div>\n ", styles: ["\n .tab-btn {\n margin-right: 0px;\n background-color: white;\n min-width: 80px;\n }\n\n .tab-btn:hover,\n .tab-btn:focus,\n .tab-btn-active {\n background-color: #327cd4;\n color: white;\n }\n\n .tab-btn:hover {\n background-color: #E5F1FB;\n color: #000;\n }\n\n .tab-group {\n display: flex;\n justify-content: flex-end;\n }\n\n .dashboard {\n background: #fff;\n position: relative;\n padding: 0px;\n }\n\n .chart-group {\n float: left;\n position: relative;\n min-height: 1px;\n }\n\n .two-col-group {\n width: 534px;\n margin-right: 72px;\n }\n\n .three-col-group {\n width: 30%;\n margin-right: 16px;\n }\n "] },] },
{ type: ChangeDetectorRef, }, ]; }; TabbedLineChartGroupComponent.propDecorators = { 'lineChartComponents': [{ type: ViewChildren, args: [LineChartComponent,] },], 'groupData': [{ type: Input },], 'tabList': [{ type: Input },], 'chartsPerRow': [{ type: Input },], 'loadingWheels': [{ type: Input },], 'loadingMessage': [{ type: Input },], 'tooltipFormatters': [{ type: Input },], 'onTabClick': [{ type: Output },], }; //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImFuZ3VsYXIvY29udHJvbHMvbGluZS1jaGFydC90YWJiZWQtbGluZS1jaGFydC1ncm91cC90YWJiZWQtbGluZS1jaGFydC1ncm91cC5jb21wb25lbnQudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IkFBQUEsT0FBTyxFQUNILGlCQUFpQixFQUNqQixTQUFTLEVBQ1QsWUFBWSxFQUNaLEtBQUssRUFHTCxNQUFNLEVBR04sWUFBWSxFQUNmLE1BQU0sZUFBQSxDQUFnQjtBQUV2QixPQUFPLEVBQUUsa0JBQUEsRUFBa0MsTUFBTyx5QkFBQSxDQUEwQjtBQUc1RTtJQWdCSSx1Q0FBb0IsaUJBQW9DO1FBQXBDLHNCQUFpQixHQUFqQixpQkFBaUIsQ0FBbUI7UUFaaEQsaUJBQVksR0FBRyxDQUFDLENBQUM7UUFPakIsZUFBVSxHQUFHLElBQUksWUFBWSxFQUFVLENBQUM7SUFLWSxDQUFDO0lBRXRELGdEQUFRLEdBQWY7UUFDSSxFQUFFLENBQUMsQ0FBQyxJQUFJLENBQUMsT0FBTyxDQUFDLENBQUMsQ0FBQztZQUNmLElBQUksQ0FBQyxXQUFXLEdBQUcsT0FBTyxDQUFDLEtBQUssQ0FBQyxJQUFJLENBQUMsT0FBTyxDQUFDLENBQUM7WUFDL0MsSUFBSSxDQUFDLHFCQUFxQixFQUFFLENBQUM7WUFFN0IsK0JBQStCO1lBQy9CLElBQUksQ0FBQyxpQkFBaUIsR0FBRyxJQUFJLENBQUMsaUJBQWlCLEdBQUcsSUFBSSxDQUFDLGlCQUFpQixHQUFHLEVBQUUsQ0FBQztZQUU5RSxHQUFHLENBQUMsQ0FBQyxJQUFJLENBQUMsR0FBRyxDQUFDLEVBQUUsQ0FBQyxHQUFHLElBQUksQ0FBQyxTQUFTLENBQUMsTUFBTSxFQUFFLENBQUMsRUFBRSxFQUFFLENBQUM7Z0JBQzdDLEVBQUUsQ0FBQyxDQUFDLENBQUMsSUFBSSxDQUFDLGlCQUFpQixDQUFDLENBQUMsQ0FBQyxDQUFDLENBQUMsQ0FBQztvQkFDN0IseUZBQXlGO29CQUN6RixJQUFJLENBQUMsaUJBQWlCLENBQUMsQ0FBQyxDQUFDLEdBQUcsSUFBSSxDQUFDO2dCQUNyQyxDQUFDO1lBQ0wsQ0FBQztRQUNMLENBQUM7UUFBQyxJQUFJLENBQUMsQ0FBQztZQUNKLE1BQU0sSUFBSSxLQUFLLENBQUMsNkRBQTZELENBQUMsQ0FBQztRQUNuRixDQUFDO0lBQ0wsQ0FBQztJQUVNLDZEQUFxQixHQUE1QjtRQUNJLHNEQUFzRDtRQUN0RCxFQUFFLENBQUMsQ0FBQyxJQUFJLENBQUMsWUFBWSxLQUFLLENBQUMsSUFBSSxJQUFJLENBQUMsWUFBWSxLQUFLLENBQUMsQ0FBQyxDQUFDLENBQUM7WUFDckQsb0RBQW9EO1lBQ3BELElBQUksVUFBVSxHQUFHLEdBQUcsR0FBRyxDQUFDLElBQUksQ0FBQyxZQUFZLEdBQUcsQ0FBQyxDQUFDLENBQUM7WUFDL0MsSUFBSSxDQUFDLGFBQWEsR0FBRyxNQUFNLENBQUMsTUFBTSxDQUFDLFVBQVUsQ0FBQyxDQUFDO1lBQy9DLElBQUksQ0FBQyxjQUFjLEdBQUcsTUFBTSxDQUFDO1FBQ2pDLENBQUM7SUFDTCxDQUFDO0lBRUQ7O09BRUc7SUFDSSw4Q0FBTSxHQUFiLFVBQWMsR0FBVyxFQUFFLEtBQWE7UUFDcEMsSUFBSSxDQUFDLFdBQVcsR0FBRyxHQUFHLENBQUM7UUFDdkIsSUFBSSxDQUFDLFVBQVUsQ0FBQyxJQUFJLENBQUMsS0FBSyxDQUFDLENBQUM7UUFDNUIsSUFBSSxDQUFDLE9BQU8sRUFBRSxDQUFDO0lBQ25CLENBQUM7SUFFRDs7T0FFRztJQUNJLCtDQUFPLEdBQWQ7UUFDSSxJQUFJLENBQUMsaUJBQWlCLENBQUMsYUFBYSxFQUFFLENBQUM7UUFDdkMsRUFBRSxDQUFDLENBQUMsSUFBSSxDQUFDLG1CQUFtQixDQUFDLENBQUMsQ0FBQztZQUMzQixJQUFJLENBQUMsbUJBQW1CLENBQUMsT0FBTyxDQUFDLFVBQUEsU0FBUztnQkFDdEMsU0FBUyxDQUFDLE9BQU8sRUFBRSxDQUFDO1lBQ3hCLENBQUMsQ0FBQyxDQUFDO1FBQ1AsQ0FBQztJQUNMLENBQUM7SUFFRDs7T0FFRztJQUNJLHdEQUFnQixHQUF2QixVQUF3QixZQUFpQjtRQUNyQyxNQUFNLENBQUMsWUFBWSxDQUFDLEdBQUcsQ0FBQyxJQUFJLENBQUMsV0FBVyxDQUFDLENBQUM7SUFDOUMsQ0FBQztJQUVEOztPQUVHO0lBQ0kscURBQWEsR0FBcEIsVUFBcUIsWUFBaUI7UUFDbEMsTUFBTSxDQUFDLElBQUksQ0FBQyxnQkFBZ0IsQ0FBQyxZQUFZLENBQUMsQ0FBQyxTQUFTLENBQUM7SUFDekQsQ0FBQztJQUVNLG1EQUFXLEdBQWxCO1FBQ0ksSUFBSSxDQUFDLGlCQUFpQixDQUFDLE1BQU0sRUFBRSxDQUFDO0lBQ3BDLENBQUM7SUFnRkwsb0NBQUM7QUFBRCxDQXBLQSxBQW9LQzs7QUEvRU0sd0NBQVUsR0FBMEI7SUFDM0MsRUFBRSxJQUFJLEVBQUUsU0FBUyxFQUFFLElBQUksRUFBRSxDQUFDO2dCQUN0QixRQUFRLEVBQUUsNkJBQTZCO2dCQUN2QyxRQUFRLEVBQUUsbS9CQWFUO2dCQUNELE1BQU0sRUFBRSxDQUFDLG0xQkE2Q1IsQ0FBQzthQUNMLEVBQUcsRUFBRTtDQUNMLENBQUM7QUFDRixrQkFBa0I7QUFDWCw0Q0FBYyxHQUFtRSxjQUFNLE9BQUE7SUFDOUYsRUFBQyxJQUFJLEVBQUUsaUJBQWlCLEdBQUc7Q0FDMUIsRUFGNkYsQ0FFN0YsQ0FBQztBQUNLLDRDQUFjLEdBQTJDO0lBQ2hFLHFCQUFxQixFQUFFLENBQUMsRUFBRSxJQUFJLEVBQUUsWUFBWSxFQUFFLElBQUksRUFBRSxDQUFDLGtCQUFrQixFQUFHLEVBQUUsRUFBRTtJQUM5RSxXQUFXLEVBQUUsQ0FBQyxFQUFFLElBQUksRUFBRSxLQUFLLEVBQUUsRUFBRTtJQUMvQixTQUFTLEVBQUUsQ0FBQyxFQUFFLElBQUksRUFBRSxLQUFLLEVBQUUsRUFBRTtJQUM3QixjQUFjLEVBQUUsQ0FBQyxFQUFFLElBQUksRUFBRSxLQUFLLEVBQUUsRUFBRTtJQUNsQyxlQUFlLEVBQUUsQ0FBQyxFQUFFLElBQUksRUFBRSxLQUFLLEVBQUUsRUFBRTtJQUNuQyxnQkFBZ0IsRUFBRSxDQUFDLEVBQUUsSUFBSSxFQUFFLEtBQUssRUFBRSxFQUFFO0lBQ3BDLG1CQUFtQixFQUFFLENBQUMsRUFBRSxJQUFJLEVBQUUsS0FBSyxFQUFFLEVBQUU7SUFDdkMsWUFBWSxFQUFFLENBQUMsRUFBRSxJQUFJLEVBQUUsTUFBTSxFQUFFLEVBQUU7Q0FDaEMsQ0FBQyIsImZpbGUiOiJ0YWJiZWQtbGluZS1jaGFydC1ncm91cC5jb21wb25lbnQuanMiLCJzb3VyY2VSb290IjoiQzovQkEvMTMxL3MvaW5saW5lU3JjLyJ9
]; /** @nocollapse */ TabbedLineChartGroupComponent.ctorParameters = function () { return [
add.go
package keys import ( "bufio" "bytes" "errors" "fmt" "sort" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/input" "github.com/cosmos/cosmos-sdk/crypto/keys" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/cosmos/go-bip39" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/multisig" "github.com/tendermint/tendermint/libs/cli" ) const ( flagInteractive = "interactive" flagRecover = "recover" flagNoBackup = "no-backup" flagDryRun = "dry-run" flagAccount = "account" flagIndex = "index" flagMultisig = "multisig" flagNoSort = "nosort" // DefaultKeyPass contains the default key password for genesis transactions DefaultKeyPass = "12345678" ) func addKeyCommand() *cobra.Command
/* input - bip39 mnemonic - bip39 passphrase - bip44 path - local encryption password output - armor encrypted private key (saved to file) */ func runAddCmd(cmd *cobra.Command, args []string) error { var kb keys.Keybase var err error var encryptPassword string inBuf := bufio.NewReader(cmd.InOrStdin()) name := args[0] interactive := viper.GetBool(flagInteractive) showMnemonic := !viper.GetBool(flagNoBackup) if viper.GetBool(flagDryRun) { // we throw this away, so don't enforce args, // we want to get a new random seed phrase quickly kb = keys.NewInMemory() encryptPassword = DefaultKeyPass } else { kb, err = NewKeyBaseFromHomeFlag() if err != nil { return err } _, err = kb.Get(name) if err == nil { // account exists, ask for user confirmation response, err2 := input.GetConfirmation(fmt.Sprintf("override the existing name %s", name), inBuf) if err2 != nil { return err2 } if !response { return errors.New("aborted") } } multisigKeys := viper.GetStringSlice(flagMultisig) if len(multisigKeys) != 0 { var pks []crypto.PubKey multisigThreshold := viper.GetInt(flagMultiSigThreshold) if err := validateMultisigThreshold(multisigThreshold, len(multisigKeys)); err != nil { return err } for _, keyname := range multisigKeys { k, err := kb.Get(keyname) if err != nil { return err } pks = append(pks, k.GetPubKey()) } // Handle --nosort if !viper.GetBool(flagNoSort) { sort.Slice(pks, func(i, j int) bool { return bytes.Compare(pks[i].Address(), pks[j].Address()) < 0 }) } pk := multisig.NewPubKeyMultisigThreshold(multisigThreshold, pks) if _, err := kb.CreateMulti(name, pk); err != nil { return err } cmd.PrintErrf("Key %q saved to disk.\n", name) return nil } // ask for a password when generating a local key if viper.GetString(FlagPublicKey) == "" && !viper.GetBool(flags.FlagUseLedger) { encryptPassword, err = input.GetCheckPassword( "Enter a passphrase to encrypt your key to disk:", "Repeat the passphrase:", inBuf) if err != nil { return err } } } if viper.GetString(FlagPublicKey) != "" { pk, err := sdk.GetAccPubKeyBech32(viper.GetString(FlagPublicKey)) if err != nil { return err } _, err = kb.CreateOffline(name, pk) if err != nil { return err } return nil } account := uint32(viper.GetInt(flagAccount)) index := uint32(viper.GetInt(flagIndex)) // If we're using ledger, only thing we need is the path and the bech32 prefix. if viper.GetBool(flags.FlagUseLedger) { bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() info, err := kb.CreateLedger(name, keys.Secp256k1, bech32PrefixAccAddr, account, index) if err != nil { return err } return printCreate(cmd, info, false, "") } // Get bip39 mnemonic var mnemonic string var bip39Passphrase string if interactive || viper.GetBool(flagRecover) { bip39Message := "Enter your bip39 mnemonic" if !viper.GetBool(flagRecover) { bip39Message = "Enter your bip39 mnemonic, or hit enter to generate one." } mnemonic, err = input.GetString(bip39Message, inBuf) if err != nil { return err } if !bip39.IsMnemonicValid(mnemonic) { return errors.New("invalid mnemonic") } } if len(mnemonic) == 0 { // read entropy seed straight from crypto.Rand and convert to mnemonic entropySeed, err := bip39.NewEntropy(mnemonicEntropySize) if err != nil { return err } mnemonic, err = bip39.NewMnemonic(entropySeed[:]) if err != nil { return err } } // override bip39 passphrase if interactive { bip39Passphrase, err = input.GetString( "Enter your bip39 passphrase. This is combined with the mnemonic to derive the seed. "+ "Most users should just hit enter to use the default, \"\"", inBuf) if err != nil { return err } // if they use one, make them re-enter it if len(bip39Passphrase) != 0 { p2, err := input.GetString("Repeat the passphrase:", inBuf) if err != nil { return err } if bip39Passphrase != p2 { return errors.New("passphrases don't match") } } } info, err := kb.CreateAccount(name, mnemonic, bip39Passphrase, encryptPassword, account, index) if err != nil { return err } // Recover key from seed passphrase if viper.GetBool(flagRecover) { // Hide mnemonic from output showMnemonic = false mnemonic = "" } return printCreate(cmd, info, showMnemonic, mnemonic) } func printCreate(cmd *cobra.Command, info keys.Info, showMnemonic bool, mnemonic string) error { output := viper.Get(cli.OutputFlag) switch output { case OutputFormatText: cmd.PrintErrln() printKeyInfo(info, keys.Bech32KeyOutput) // print mnemonic unless requested not to. if showMnemonic { cmd.PrintErrln("\n**Important** write this mnemonic phrase in a safe place.") cmd.PrintErrln("It is the only way to recover your account if you ever forget your password.") cmd.PrintErrln("") cmd.PrintErrln(mnemonic) } case OutputFormatJSON: out, err := keys.Bech32KeyOutput(info) if err != nil { return err } if showMnemonic { out.Mnemonic = mnemonic } var jsonString []byte if viper.GetBool(flags.FlagIndentResponse) { jsonString, err = cdc.MarshalJSONIndent(out, "", " ") } else { jsonString, err = cdc.MarshalJSON(out) } if err != nil { return err } cmd.PrintErrln(string(jsonString)) default: return fmt.Errorf("I can't speak: %s", output) } return nil }
{ cmd := &cobra.Command{ Use: "add <name>", Short: "Add an encrypted private key (either newly generated or recovered), encrypt it, and save to disk", Long: `Derive a new private key and encrypt to disk. Optionally specify a BIP39 mnemonic, a BIP39 passphrase to further secure the mnemonic, and a bip32 HD path to derive a specific account. The key will be stored under the given name and encrypted with the given password. The only input that is required is the encryption password. If run with -i, it will prompt the user for BIP44 path, BIP39 mnemonic, and passphrase. The flag --recover allows one to recover a key from a seed passphrase. If run with --dry-run, a key would be generated (or recovered) but not stored to the local keystore. Use the --pubkey flag to add arbitrary public keys to the keystore for constructing multisig transactions. You can add a multisig key by passing the list of key names you want the public key to be composed of to the --multisig flag and the minimum number of signatures required through --multisig-threshold. The keys are sorted by address, unless the flag --nosort is set. `, Args: cobra.ExactArgs(1), RunE: runAddCmd, } cmd.Flags().StringSlice(flagMultisig, nil, "Construct and store a multisig public key (implies --pubkey)") cmd.Flags().Uint(flagMultiSigThreshold, 1, "K out of N required signatures. For use in conjunction with --multisig") cmd.Flags().Bool(flagNoSort, false, "Keys passed to --multisig are taken in the order they're supplied") cmd.Flags().String(FlagPublicKey, "", "Parse a public key in bech32 format and save it to disk") cmd.Flags().BoolP(flagInteractive, "i", false, "Interactively prompt user for BIP39 passphrase and mnemonic") cmd.Flags().Bool(flags.FlagUseLedger, false, "Store a local reference to a private key on a Ledger device") cmd.Flags().Bool(flagRecover, false, "Provide seed phrase to recover existing key instead of creating") cmd.Flags().Bool(flagNoBackup, false, "Don't print out seed phrase (if others are watching the terminal)") cmd.Flags().Bool(flagDryRun, false, "Perform action, but don't add key to local keystore") cmd.Flags().Uint32(flagAccount, 0, "Account number for HD derivation") cmd.Flags().Uint32(flagIndex, 0, "Address index number for HD derivation") cmd.Flags().Bool(flags.FlagIndentResponse, false, "Add indent to JSON response") return cmd }
02_whole_body_from_image.py
# From Python # It requires OpenCV installed for Python import sys import cv2 import os from sys import platform import argparse # Import Openpose (Windows/Ubuntu/OSX) dir_path = os.path.dirname(os.path.realpath(__file__)) try: # Windows Import if platform == "win32": # Change these variables to point to the correct folder (Release/x64 etc.) sys.path.append(dir_path + '/../../python/openpose/Release'); os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;' import pyopenpose as op else: # Change these variables to point to the correct folder (Release/x64 etc.) sys.path.append('../../python'); # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it. # sys.path.append('/usr/local/python') from openpose import pyopenpose as op except ImportError as e: print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?') raise e # Flags parser = argparse.ArgumentParser() parser.add_argument("--image_path", default="../../../examples/media/COCO_val2014_000000000241.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).") args = parser.parse_known_args() # Custom Params (refer to include/openpose/flags.hpp for more parameters) params = dict() params["model_folder"] = "../../../models/" params["face"] = True params["hand"] = True # Add others in path? for i in range(0, len(args[1])): curr_item = args[1][i] if i != len(args[1])-1: next_item = args[1][i+1] else: next_item = "1" if "--" in curr_item and "--" in next_item: key = curr_item.replace('-','') if key not in params: params[key] = "1" elif "--" in curr_item and "--" not in next_item: key = curr_item.replace('-','') if key not in params: params[key] = next_item # Construct it from system arguments # op.init_argv(args[1]) # oppython = op.OpenposePython()
opWrapper = op.WrapperPython() opWrapper.configure(params) opWrapper.start() # Process Image datum = op.Datum() imageToProcess = cv2.imread(args[0].image_path) datum.cvInputData = imageToProcess opWrapper.emplaceAndPop([datum]) # Display Image print("Body keypoints: \n" + str(datum.poseKeypoints)) print("Face keypoints: \n" + str(datum.faceKeypoints)) print("Left hand keypoints: \n" + str(datum.handKeypoints[0])) print("Right hand keypoints: \n" + str(datum.handKeypoints[1])) cv2.imshow("OpenPose 1.4.0 - Tutorial Python API", datum.cvOutputData) cv2.waitKey(0)
# Starting OpenPose
Text.d.ts
// import * as Phaser from 'phaser'; import CanvasGameObjectBase from '../../../utils/types/CanvasGameObjectBase'; export default Text; declare namespace Text { type MetricsType = { ascent: number, descent: number, fontSize: number }; type FontConfigType = string | { fontFamily?: string, fontSize?: string, fontStyle?: string }; type TextMarginsType = { left?: number }; interface TextStyle { fontFamily?: string, fontSize?: string, fontStyle?: string, backgroundColor?: null | string | number, backgroundColor2?: null | string | number, backgroundHorizontalGradient?: boolean, backgroundStrokeColor?: null | string | number, backgroundStrokeLineWidth?: number, backgroundCornerRadius?: number, backgroundCornerIteration?: null | number, color?: null | string | number, fill?: null | string | number, stroke?: null | string | number, strokeThickness?: number, shadow?: { offsetX?: number, offsetY?: number, color?: number | string, blur?: number, stroke?: false, fill?: false }, underline?: { color?: number | string, thickness?: number, offset?: number, }, align?: 'left' | 'center' | 'right', halign?: 'left' | 'center' | 'right', valign?: 'top' | 'center' | 'bottom', padding?: { left?: number, right?: number, top?: number, bottom?: number, }, maxLines?: number, lineSpacing?: number, fixedWidth?: number, fixedHeight?: number, resolution?: number, testString?: string, wrap?: { mode?: 0 | 1 | 2 | 'none' | 'word' | 'char' | 'character' width?: null | number, }, metrics?: boolean | { ascent: number, descent: number, fontSize: number }, } namespace Events { type AnyAreaCallbackType = ( key: string, pointer: Phaser.Input.Pointer, localX: number, localY: number ) => void; type AreaCallbackType = ( pointer: Phaser.Input.Pointer, localX: number, localY: number ) => void; } } declare class
extends CanvasGameObjectBase { constructor( scene: Phaser.Scene, x?: number, y?: number, content?: string, style?: Text.TextStyle ); text: string; setText(text: string | number | string[]): this; appendText(text: string | number | string[]): this; getPlainText( text?: string | undefined, start?: number, end?: number ): string; getWrappedText( text?: string | undefined, start?: number, end?: number ): string; getText( text?: string | undefined, start?: number, end?: number ): string; getSubString( text?: string | undefined, start?: number, end?: number ): string; updateText(runWrap?: boolean): this; setWrapMode( mode: 0 | 1 | 2 | 'none' | 'word' | 'char' | 'character' ): this; setWrapWidth(width: number): this; setFont(font: Text.FontConfigType): this; setFontFamily(family: string): this; setFontSize(size: number | string): this; setFontStyle(style: string): this; setStyle(style: Text.TextStyle): this; setTestString(string: string): this; setColor( color?: null | string | number ): this; setFill( color?: null | string | number ): this; setStroke( color?: null | string | number, thickness?: number ): this; setUnderline( color?: null | string | number, thickness?: number, ofset?: number ): this; setUnderlineColor( color?: null | string | number ): this; setUnderlineThinkness(thickness: number): this; setUnderlineOffset(ofset: number): this; setBackgroundColor( color?: null | string | number, color2?: null | string | number, isHorizontalGradient?: boolean ): this; setBackgroundStrokeColor( color?: null | string | number, lineWidth?: number ): this; setBackgroundCornerRadius( radius?: number, iteration?: number ): this; setShadow( x?: number, y?: number, color?: null | string | number, blur?: number, shadowStroke?: boolean, shadowFill?: boolean ): this; setShadowOffset(x: number, y: number): this; setShadowColor(color?: null | string | number): this; setShadowBlur(blur: number): this; setShadowStroke(enabled?: boolean): this; setShadowFill(enabled?: boolean): this; setAlign(align?: 'left' | 'center' | 'right'): this; setHAlign(align?: 'left' | 'center' | 'right'): this; setVAlign(align?: 'top' | 'center' | 'bottom'): this; addImage( imgKey: string, config?: { key: string, frame?: string, width?: number, height?: number, y?: number, left?: number, right?: number, } ): this; drawAreaBounds( graphics: Phaser.GameObjects.Graphics, color?: number ): this; setLineSpacing(value: number): this; setXOffset(value: number): this; setPadding( left?: number | { left?: number, right?: number, top?: number, bottom?: number }, top?: number, right?: number, bottom?: number, ): this; setMaxLines(max?: number): this; measureTextMargins( testString: string, out?: Text.TextMarginsType ): Text.TextMarginsType; setResolution(value: number): this; setFixedSize(width?: number, height?: number): this; setSize(width?: number, height?: number): this; resize(width?: number, height?: number): this; getTextMetrics(): Text.MetricsType; setTextMetrics( metrics: Text.MetricsType, font: Text.FontConfigType ): this; style: { color: string | null, stroke: string | null, strokeThickness: number, underlineColor: string | null, underlineThickness: number, underlineOffset: number, backgroundColor: string | null, backgroundColor2: string | null, backgroundHorizontalGradient: boolean, backgroundStrokeColor: string | null, backgroundStrokeLineWidth: number, backgroundCornerRadius: number, backgroundCornerIteration: number | undefined, shadowColor: string | null, shadowOffsetX: number, shadowOffsetY: number, shadowBlur: number, shadowStroke: boolean, shadowFill: boolean, lineSpacing: number, maxLines: number, resolution: number, fixedWidth: number, fixedHeight: number, halign: string, valign: string, wrapWidth: number | null, wrapMode: number }; padding: { left: number, right: number, top: number, bottom: number }; lineSpacing: number; }
Text
http_compress.controller.ts
/** * Copyright 2019 F5 Networks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import {Filter, repository} from '@loopback/repository'; import { post, param, get, getFilterSchemaFor, del, requestBody, HttpErrors, RequestContext, RestBindings, } from '@loopback/rest'; import {ProfileHTTPCompressionRepository} from '../../repositories'; import {Schema, Response, CollectionResponse} from '..'; import {BaseController} from '../base.controller'; import {inject} from '@loopback/core'; import {ProfileHTTPCompression} from '../../models'; const prefix = '/adcaas/v1/profiles'; const createDesc: string = 'ProfileHTTPCompress resource that need to be created'; export class ProfileHTTPCompressionController extends BaseController { constructor( @inject(RestBindings.Http.CONTEXT) protected reqCxt: RequestContext, @repository(ProfileHTTPCompressionRepository) private profileHTTPCompressionRepository: ProfileHTTPCompressionRepository, ) { super(reqCxt); } @post(prefix + '/http_compress_profiles', { responses: { '200': Schema.response( ProfileHTTPCompression, 'Successfully create HTTP_Compress profile resource', ), '400': Schema.badRequest('Invalid profile resource'), '422': Schema.unprocessableEntity('Unprocessable profile resource'), }, }) async create( @requestBody(Schema.createRequest(ProfileHTTPCompression, createDesc)) reqBody: Partial<ProfileHTTPCompression>, ): Promise<Response> { try { reqBody.tenantId = await this.tenantId; const data = await this.profileHTTPCompressionRepository.create(reqBody); return new Response(ProfileHTTPCompression, data); } catch (error) { throw new HttpErrors.BadRequest(error.message); } } @get(prefix + '/http_compress_profiles', {
), }, }) async find( @param.query.object('filter', getFilterSchemaFor(ProfileHTTPCompression)) filter?: Filter, ): Promise<CollectionResponse> { const data = await this.profileHTTPCompressionRepository.find(filter, { tenantId: await this.tenantId, }); return new CollectionResponse(ProfileHTTPCompression, data); } @del(prefix + '/http_compress_profiles/{profileId}', { responses: { '204': Schema.emptyResponse( 'Successfully delete ProfileHTTPCompress resource', ), '404': Schema.notFound('Can not find ProfileHTTPCompress resource'), }, }) async deleteById( @param(Schema.pathParameter('profileId', 'ProfileHTTPCompress resource ID')) id: string, ): Promise<void> { await this.profileHTTPCompressionRepository.deleteById(id, { tenantId: await this.tenantId, }); } }
responses: { '200': Schema.collectionResponse( ProfileHTTPCompression, 'Successfully retrieve ProfileHTTPCompress resources',
dtls.go
package dtls const (
VersionDTLS10 uint16 = 0xfeff VersionDTLS12 uint16 = 0xfefd )
HTMLFetcher.ts
import Fetcher, { FetchParam } from './Fetcher'; class HTMLFetcher extends Fetcher { core: Window = window;
} } export default HTMLFetcher;
fetch(fetchParam: FetchParam): Promise<unknown> { return this.core.fetch(fetchParam.uri, { headers: fetchParam.headers });
MailListCtrl.js
(function () { 'use strict'; angular.module('OwlAdmin.pages.mail') .controller('MailListCtrl', MailListCtrl); /** @ngInject */
} })();
function MailListCtrl($stateParams, mailMessages) { var vm = this; vm.messages = mailMessages.getMessagesByLabel($stateParams.label); vm.label = $stateParams.label;
keypool.rs
use util::u8_vector; use overflow::{WrappedStep, WrappedInc}; use std::fmt; use rustc_serialize::json::{Json, ToJson}; use std::collections::BTreeMap; use std::u8; #[derive(Debug, Clone, RustcDecodable)] pub struct KeyPool { pub dynamic_ms_cap: u8, pub dynamic_ms_start: u8, pub static_ms_bytes: Vec<u8>, pub dynamic_bytes: Vec<u8>, pub static_ls_bytes: Vec<u8>, } impl KeyPool { pub fn new(ms_bytes: u8, dy_bytes: u8, ls_bytes: u8) -> KeyPool { // let cap = 256 / parition_count; // println!("{} {}", cap, parition_count); KeyPool { dynamic_ms_cap: 0, // meaning the value will eventuall wrap around dynamic_ms_start: 0, static_ms_bytes: u8_vector(ms_bytes), dynamic_bytes: u8_vector(dy_bytes), static_ls_bytes: u8_vector(ls_bytes), } } pub fn split_key(&self, parition_count: u64) -> Vec<KeyPool> { let mut output = vec![]; let mut dynamic_bytes = self.dynamic_bytes.clone(); let step = if self.dynamic_ms_cap == 0 { let o = ((256 - self.dynamic_bytes[0] as u64) / parition_count) as u8; // println!("o {}", o); o } else { ((self.dynamic_ms_cap as u64 + 1) / parition_count) as u8 }; for _ in 0..parition_count { let db = dynamic_bytes.clone(); let cap = dynamic_bytes[0].step(&step); output.push(KeyPool { dynamic_ms_cap: cap, dynamic_ms_start: db[0], static_ms_bytes: self.static_ms_bytes.clone(), dynamic_bytes: db, static_ls_bytes: self.static_ls_bytes.clone() }); dynamic_bytes[0] = dynamic_bytes[0].step(&step); } output } pub fn is_cap_reached(&self) -> bool { self.dynamic_bytes[0] == self.dynamic_ms_cap } pub fn is_done(&self) -> bool { let mut done = true; for idx in 1..self.dynamic_bytes.len() { if self.dynamic_bytes[idx] != 255 { done = false; break; } } if done && self.is_cap_reached() { true } else { false } } #[allow(unused)] pub fn to_vec(&self) -> Vec<u8> { let mut output: Vec<u8> = vec![]; for b in self.static_ms_bytes.clone() { output.push(b); } for b in self.dynamic_bytes.clone() { output.push(b); } for b in self.static_ls_bytes.clone() { output.push(b); } output } /// Increments the dynamic bytes to emulate counting pub fn inc(&self) -> KeyPool { let mut slf = self.clone(); let mut idx = self.dynamic_bytes.len() - 1; loop { // bounds checking if slf.dynamic_bytes.len() == 0 || idx > slf.dynamic_bytes.len() - 1 { break; } let old_value = slf.dynamic_bytes[idx]; slf.dynamic_bytes[idx] = slf.dynamic_bytes[idx].inc(); let progress = old_value > slf.dynamic_bytes[idx]; if progress && idx != 0 { idx -= 1; } else { break; } } slf.clone() } } impl fmt::Display for KeyPool { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "({},{}:{:?})", self.dynamic_ms_cap, self.dynamic_ms_start, self.dynamic_bytes) } } impl Iterator for KeyPool { type Item = KeyPool; fn next(&mut self) -> Option<Self::Item> { if !self.is_done() { Some(self.inc()) } else { None } } } impl ToJson for KeyPool { fn to_json(&self) -> Json { let mut d = BTreeMap::new(); d.insert("dynamic_ms_cap".to_string(), self.dynamic_ms_cap.to_json()); d.insert("dynamic_ms_start".to_string(), self.dynamic_ms_start.to_json()); d.insert("static_ms_bytes".to_string(),
d.insert("dynamic_bytes".to_string(), self.dynamic_bytes.to_json()); d.insert("static_ls_bytes".to_string(), self.static_ls_bytes.to_json()); Json::Object(d) } } #[test] fn test_split_key() { let mut key = KeyPool::new(14, 2, 0); let mut keys = key.split_key(2); println!("{:?}", keys); assert!(keys[0].dynamic_bytes[0] == 0); assert!(keys[0].dynamic_ms_cap == 128); assert!(keys[1].dynamic_bytes[0] == 128); assert!(keys[1].dynamic_ms_cap == 0); key = keys[0].clone(); keys = key.split_key(2); assert!(keys[0].dynamic_bytes[0] == 0); assert!(keys[0].dynamic_ms_cap == 64); assert!(keys[1].dynamic_bytes[0] == 64); assert!(keys[1].dynamic_ms_cap == 128); }
self.static_ms_bytes.to_json());
mod.rs
mod status; mod thresholds; pub use status::MeasurementStatus; pub use thresholds::MeasurementThresholds; use std::{fmt, time::Duration}; #[derive(Clone, Debug)] pub struct Measurement { info: String, actual: Duration, thresholds: MeasurementThresholds, } impl Measurement { pub fn new(info: String, actual: Duration, thresholds: MeasurementThresholds) -> Self { Self { info, actual, thresholds, } } pub fn
(&self) -> String { self.info.clone() } pub fn actual(&self) -> Duration { self.actual } pub fn thresholds(&self) -> MeasurementThresholds { self.thresholds.clone() } pub fn result(&self) -> MeasurementStatus { self.thresholds.status(&self.actual) } } impl fmt::Display for Measurement { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "Measurement: {}. Result: {}. Actual: {:.3} s. Thresholds: {}", self.info(), self.result().to_string(), self.actual.as_millis() as f32 / 1000.0, self.thresholds ) } }
info
esera-bridge.rs
#[macro_use] extern crate log; use anyhow::{Context, Result}; use crossbeam::channel::{self, Receiver, Sender}; use std::fmt; use std::net::ToSocketAddrs; use std::thread; use std::time::Duration; use structopt::StructOpt; use thiserror::Error; use esera_mqtt::{ Bus, ControllerConnection, ControllerError, Device, MqttConnection, MqttMsg, Routes, OW, }; #[derive(Error, Debug)] pub enum Error { #[error("Controller channel closed")] ChanClosed, #[error("MQTT broker connection closed")] MqttClosed, } #[derive(StructOpt, Clone, Debug)] struct Opt { /// Host name or IP address of a ESERA controller /// /// Can optionally contain a port number separated with ":". If no port number is given, the /// default port number applies. #[structopt(value_name = "HOST|IP[:PORT]")] controller: String, /// Port number #[structopt(short = "p", long, default_value = "5000")] default_port: u16, /// MQTT broker address #[structopt(short = "H", long, default_value = "localhost", env = "MQTT_HOST")] mqtt_host: String, /// MQTT credentials (username:password) #[structopt(short = "C", long, default_value = "", env = "MQTT_CRED")] mqtt_cred: String, } type ChannelPair<O, I> = (Sender<O>, Receiver<I>); fn ctrl_loop<'a, A>(addr: A) -> Result<ChannelPair<String, Result<OW, ControllerError>>> where A: ToSocketAddrs + Clone + fmt::Debug + Send + 'a, { let (up_tx, up_rx) = channel::unbounded(); let (down_tx, down_rx) = channel::unbounded(); let mut c = ControllerConnection::new(addr)?; // this is going to trigger registration which will be handled via ordinary event processing down_tx.send(c.csi()).ok(); down_tx.send(c.list()).ok(); thread::spawn(move || { if let Err(e) = c.event_loop(up_rx, down_tx) { error!("[{}] Controller event loop died: {}", c.contno, e) } }); Ok((up_tx, down_rx)) } struct App { opt: Opt, ctrl_tx: Sender<String>, ctrl_rx: Receiver<Result<OW, ControllerError>>, bus: Bus, routes: Routes<usize>, } impl App { fn new(opt: &Opt) -> Result<Self> { let (ctrl_tx, ctrl_rx) = if opt.controller.find(':').is_some() { ctrl_loop(opt.controller.as_str()) } else { ctrl_loop((opt.controller.as_str(), opt.default_port)) } .context("Failed to set up initial controller connection")?; Ok(Self { opt: opt.clone(), ctrl_tx, ctrl_rx, bus: Bus::default(), routes: Routes::new(), }) } fn handle(&mut self) -> Result<()> { // process first controller message separately to figure out controller number let resp = self.ctrl_rx.recv().map_err(|_| Error::ChanClosed)??; let contno = resp.contno; let (mut mqtt, mqtt_chan) = MqttConnection::new( &self.opt.mqtt_host, &self.opt.mqtt_cred, format!("ESERA/{}/status", contno), None, )?; self.bus.handle_1wire(resp, &mut self.routes)?; let mut sel = channel::Select::new(); let mqtt_idx = sel.recv(&mqtt_chan); let ctrl_idx = sel.recv(&self.ctrl_rx); loop { let op = sel.select(); match op.index() { i if i == ctrl_idx => { match op.recv(&self.ctrl_rx).map_err(|_| Error::ChanClosed)? { Ok(resp) => self .bus .handle_1wire(resp, &mut self.routes)? .send(&mut mqtt, &self.ctrl_tx)?, Err(ControllerError::Transport(e)) => { error!("[{}] No data received from controller ({})", contno, e); return Err(Error::ChanClosed.into()); } Err(e) => warn!("[{}] Controller read: {}", contno, e), }; } i if i == mqtt_idx => { let msg = op.recv(&mqtt_chan).map_err(|_| Error::MqttClosed)?; match msg { MqttMsg::Pub { ref topic, .. } => { for (dev, tok) in self.routes.lookup(topic) { self.bus.devices[*dev] .handle_mqtt(&msg, *tok)? .send(&mut mqtt, &self.ctrl_tx)? } } MqttMsg::Reconnected => { info!("Renewing MQTT subscriptions"); for msg in self.routes.subscriptions() { mqtt.send(msg)?; } } _ => (), // ignore } } _ => panic!("BUG: unknown select() channel indexed"), } } } } fn
(opt: Opt) -> Result<()> { debug!("Entering main event loop"); loop { match App::new(&opt).and_then(|mut app| app.handle()) { Ok(_) => return Ok(()), Err(e) => error!("{}", e), } warn!("Connection lost, retrying in 5s"); thread::sleep(Duration::new(5, 0)); } } fn main() { dotenv::dotenv().ok(); env_logger::builder().format_timestamp(None).init(); if let Err(e) = run(Opt::from_args()) { error!("FATAL: {}", e); std::process::exit(1) } }
run
stats.go
package main import ( "net" "github.com/cestlascorpion/offlinepush/core" "github.com/cestlascorpion/offlinepush/proto" "github.com/cestlascorpion/offlinepush/stats" "github.com/jinzhu/configor" log "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ) func main()
{ lis, err := net.Listen("tcp", core.StatsServerAddr) if err != nil { log.Fatalf("listen failed err %+v", err) return } conf := &core.PushConfig{} err = configor.Load(conf, "conf.json") if err != nil { log.Fatalf("config failed err %+v", err) return } svr, err := stats.NewServer(conf) if err != nil { log.Fatalf("new server failed err %+v", err) return } defer svr.Close() s := grpc.NewServer() proto.RegisterStatsServer(s, svr) reflection.Register(s) err = s.Serve(lis) if err != nil { log.Fatalf("serve failed err %+v", err) return } }
store.go
package sweep import ( "bytes" "encoding/binary" "errors" "fmt" "github.com/Actinium-project/acmd/chaincfg/chainhash" "github.com/Actinium-project/acmd/wire" "github.com/coreos/bbolt" ) var ( // lastTxBucketKey is the key that points to a bucket containing a // single item storing the last published tx. // // maps: lastTxKey -> serialized_tx lastTxBucketKey = []byte("sweeper-last-tx") // lastTxKey is the fixed key under which the serialized tx is stored. lastTxKey = []byte("last-tx") // txHashesBucketKey is the key that points to a bucket containing the // hashes of all sweep txes that were published successfully. // // maps: txHash -> empty slice txHashesBucketKey = []byte("sweeper-tx-hashes") // utxnChainPrefix is the bucket prefix for nursery buckets. utxnChainPrefix = []byte("utxn") // utxnHeightIndexKey is the sub bucket where the nursery stores the // height index. utxnHeightIndexKey = []byte("height-index") // utxnFinalizedKndrTxnKey is a static key that can be used to locate // the nursery finalized kindergarten sweep txn. utxnFinalizedKndrTxnKey = []byte("finalized-kndr-txn") byteOrder = binary.BigEndian ) // SweeperStore stores published txes. type SweeperStore interface { // IsOurTx determines whether a tx is published by us, based on its // hash. IsOurTx(hash chainhash.Hash) (bool, error) // NotifyPublishTx signals that we are about to publish a tx. NotifyPublishTx(*wire.MsgTx) error // GetLastPublishedTx returns the last tx that we called NotifyPublishTx // for. GetLastPublishedTx() (*wire.MsgTx, error) } type sweeperStore struct { db *bbolt.DB } // NewSweeperStore returns a new store instance. func NewSweeperStore(db *bbolt.DB, chainHash *chainhash.Hash) ( SweeperStore, error) { err := db.Update(func(tx *bbolt.Tx) error { _, err := tx.CreateBucketIfNotExists( lastTxBucketKey, ) if err != nil { return err } if tx.Bucket(txHashesBucketKey) != nil { return nil } txHashesBucket, err := tx.CreateBucket(txHashesBucketKey) if err != nil { return err } // Use non-existence of tx hashes bucket as a signal to migrate // nursery finalized txes. err = migrateTxHashes(tx, txHashesBucket, chainHash) return err }) if err != nil { return nil, err } return &sweeperStore{ db: db, }, nil } // migrateTxHashes migrates nursery finalized txes to the tx hashes bucket. This // is not implemented as a database migration, to keep the downgrade path open. func migrateTxHashes(tx *bbolt.Tx, txHashesBucket *bbolt.Bucket, chainHash *chainhash.Hash) error { log.Infof("Migrating UTXO nursery finalized TXIDs") // Compose chain bucket key. var b bytes.Buffer if _, err := b.Write(utxnChainPrefix); err != nil { return err } if _, err := b.Write(chainHash[:]); err != nil { return err } // Get chain bucket if exists. chainBucket := tx.Bucket(b.Bytes()) if chainBucket == nil { return nil } // Retrieve the existing height index. hghtIndex := chainBucket.Bucket(utxnHeightIndexKey) if hghtIndex == nil { return nil } // Retrieve all heights. err := hghtIndex.ForEach(func(k, v []byte) error { heightBucket := hghtIndex.Bucket(k) if heightBucket == nil { return nil } // Get finalized tx for height. txBytes := heightBucket.Get(utxnFinalizedKndrTxnKey) if txBytes == nil { return nil } // Deserialize and skip tx if it cannot be deserialized. tx := &wire.MsgTx{} err := tx.Deserialize(bytes.NewReader(txBytes)) if err != nil { log.Warnf("Cannot deserialize utxn tx") return nil } // Calculate hash. hash := tx.TxHash() // Insert utxn tx hash in hashes bucket. log.Debugf("Inserting nursery tx %v in hash list "+ "(height=%v)", hash, byteOrder.Uint32(k)) return txHashesBucket.Put(hash[:], []byte{}) }) if err != nil { return err } return nil } // NotifyPublishTx signals that we are about to publish a tx. func (s *sweeperStore) NotifyPublishTx(sweepTx *wire.MsgTx) error { return s.db.Update(func(tx *bbolt.Tx) error { lastTxBucket := tx.Bucket(lastTxBucketKey) if lastTxBucket == nil { return errors.New("last tx bucket does not exist") } txHashesBucket := tx.Bucket(txHashesBucketKey) if txHashesBucket == nil { return errors.New("tx hashes bucket does not exist") } var b bytes.Buffer if err := sweepTx.Serialize(&b); err != nil { return err } if err := lastTxBucket.Put(lastTxKey, b.Bytes()); err != nil { return err } hash := sweepTx.TxHash() return txHashesBucket.Put(hash[:], []byte{}) }) } // GetLastPublishedTx returns the last tx that we called NotifyPublishTx // for. func (s *sweeperStore) GetLastPublishedTx() (*wire.MsgTx, error) { var sweepTx *wire.MsgTx err := s.db.View(func(tx *bbolt.Tx) error { lastTxBucket := tx.Bucket(lastTxBucketKey) if lastTxBucket == nil { return errors.New("last tx bucket does not exist") } sweepTxRaw := lastTxBucket.Get(lastTxKey) if sweepTxRaw == nil { return nil }
} return nil }) if err != nil { return nil, err } return sweepTx, nil } // IsOurTx determines whether a tx is published by us, based on its // hash. func (s *sweeperStore) IsOurTx(hash chainhash.Hash) (bool, error) { var ours bool err := s.db.View(func(tx *bbolt.Tx) error { txHashesBucket := tx.Bucket(txHashesBucketKey) if txHashesBucket == nil { return errors.New("tx hashes bucket does not exist") } ours = txHashesBucket.Get(hash[:]) != nil return nil }) if err != nil { return false, err } return ours, nil } // Compile-time constraint to ensure sweeperStore implements SweeperStore. var _ SweeperStore = (*sweeperStore)(nil)
sweepTx = &wire.MsgTx{} txReader := bytes.NewReader(sweepTxRaw) if err := sweepTx.Deserialize(txReader); err != nil { return fmt.Errorf("tx deserialize: %v", err)
many_connections.rs
#![cfg(feature = "rustls")] use std::{ sync::{Arc, Mutex}, time::Duration, }; use crc::crc32; use futures::{future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use quinn::{ConnectionError, ReadError, WriteError}; use rand::{self, RngCore}; use tokio::runtime::Builder; use unwrap::unwrap; struct
{ errors: Vec<ConnectionError>, } #[test] #[ignore] fn connect_n_nodes_to_1_and_send_1mb_data() { tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .finish(), ) .unwrap(); let runtime = unwrap!(Builder::new_current_thread().enable_all().build()); let shared = Arc::new(Mutex::new(Shared { errors: vec![] })); let (cfg, listener_cert) = configure_listener(); let mut ep_builder = quinn::Endpoint::builder(); ep_builder.listen(cfg); let (endpoint, incoming_conns) = { let _guard = runtime.enter(); unwrap!(ep_builder.bind(&"127.0.0.1:0".parse().unwrap())) }; let listener_addr = unwrap!(endpoint.local_addr()); let expected_messages = 50; let shared2 = shared.clone(); let read_incoming_data = incoming_conns .filter_map(|connect| connect.map(|x| x.ok())) .take(expected_messages) .for_each(move |new_conn| { let conn = new_conn.connection; let shared = shared2.clone(); let task = new_conn .uni_streams .try_for_each(move |stream| { let conn = conn.clone(); read_from_peer(stream).map(move |_| { conn.close(0u32.into(), &[]); Ok(()) }) }) .unwrap_or_else(move |e| { shared.lock().unwrap().errors.push(e); }); tokio::spawn(task); future::ready(()) }); runtime.spawn(read_incoming_data); let client_cfg = configure_connector(&listener_cert); for _ in 0..expected_messages { let data = random_data_with_hash(1024 * 1024); let shared = shared.clone(); let task = unwrap!(endpoint.connect_with(client_cfg.clone(), &listener_addr, "localhost")) .map_err(WriteError::ConnectionClosed) .and_then(move |new_conn| write_to_peer(new_conn.connection, data)) .unwrap_or_else(move |e| { use quinn::ConnectionError::*; match e { WriteError::ConnectionClosed(ApplicationClosed { .. }) | WriteError::ConnectionClosed(Reset) => {} WriteError::ConnectionClosed(e) => shared.lock().unwrap().errors.push(e), _ => panic!("unexpected write error"), } }); runtime.spawn(task); } runtime.block_on(endpoint.wait_idle()); let shared = shared.lock().unwrap(); if !shared.errors.is_empty() { panic!("some connections failed: {:?}", shared.errors); } } async fn read_from_peer(stream: quinn::RecvStream) -> Result<(), quinn::ConnectionError> { match stream.read_to_end(1024 * 1024 * 5).await { Ok(data) => { assert!(hash_correct(&data)); Ok(()) } Err(e) => { use quinn::ReadToEndError::*; use ReadError::*; match e { TooLong | Read(UnknownStream) | Read(ZeroRttRejected) | Read(IllegalOrderedRead) => unreachable!(), Read(Reset(error_code)) => panic!("unexpected stream reset: {}", error_code), Read(ConnectionClosed(e)) => Err(e), } } } } async fn write_to_peer(conn: quinn::Connection, data: Vec<u8>) -> Result<(), WriteError> { let mut s = conn .open_uni() .await .map_err(WriteError::ConnectionClosed)?; s.write_all(&data).await?; // Suppress finish errors, since the peer may close before ACKing match s.finish().await { Ok(()) => Ok(()), Err(WriteError::ConnectionClosed(ConnectionError::ApplicationClosed { .. })) => Ok(()), Err(e) => Err(e), } } /// Builds client configuration. Trusts given node certificate. fn configure_connector(node_cert: &[u8]) -> quinn::ClientConfig { let mut peer_cfg_builder = quinn::ClientConfigBuilder::default(); let their_cert = unwrap!(quinn::Certificate::from_der(node_cert)); unwrap!(peer_cfg_builder.add_certificate_authority(their_cert)); let mut peer_cfg = peer_cfg_builder.build(); let transport_config = unwrap!(Arc::get_mut(&mut peer_cfg.transport)); transport_config .max_idle_timeout(Some(Duration::from_secs(20))) .unwrap(); peer_cfg } /// Builds listener configuration along with its certificate. fn configure_listener() -> (quinn::ServerConfig, Vec<u8>) { let (our_cert_der, our_priv_key) = gen_cert(); let our_cert = unwrap!(quinn::Certificate::from_der(&our_cert_der)); let our_cfg = Default::default(); let mut our_cfg_builder = quinn::ServerConfigBuilder::new(our_cfg); unwrap!(our_cfg_builder.certificate( quinn::CertificateChain::from_certs(vec![our_cert]), our_priv_key )); let mut our_cfg = our_cfg_builder.build(); let transport_config = unwrap!(Arc::get_mut(&mut our_cfg.transport)); transport_config .max_idle_timeout(Some(Duration::from_secs(20))) .unwrap(); (our_cfg, our_cert_der) } fn gen_cert() -> (Vec<u8>, quinn::PrivateKey) { let cert = unwrap!(rcgen::generate_simple_self_signed(vec![ "localhost".to_string() ])); let key = unwrap!(quinn::PrivateKey::from_der( &cert.serialize_private_key_der() )); (unwrap!(cert.serialize_der()), key) } /// Constructs a buffer with random bytes of given size prefixed with a hash of this data. fn random_data_with_hash(size: usize) -> Vec<u8> { let mut data = random_vec(size + 4); let hash = crc32::checksum_ieee(&data[4..]); // write hash in big endian data[0] = (hash >> 24) as u8; data[1] = ((hash >> 16) & 0xff) as u8; data[2] = ((hash >> 8) & 0xff) as u8; data[3] = (hash & 0xff) as u8; data } /// Checks if given data buffer hash is correct. Hash itself is a 4 byte prefix in the data. fn hash_correct(data: &[u8]) -> bool { let encoded_hash = ((data[0] as u32) << 24) | ((data[1] as u32) << 16) | ((data[2] as u32) << 8) | data[3] as u32; let actual_hash = crc32::checksum_ieee(&data[4..]); encoded_hash == actual_hash } #[allow(unsafe_code)] fn random_vec(size: usize) -> Vec<u8> { let mut ret = Vec::with_capacity(size); unsafe { ret.set_len(size) }; rand::thread_rng().fill_bytes(&mut ret[..]); ret }
Shared
OutlineAddToHomeScreen.js
import { FilledAddToHomeScreen as OutlineAddToHomeScreen } from './FilledAddToHomeScreen'; export { OutlineAddToHomeScreen };
AddPhysicsWorker.js
'use strict'; var transferableMessage = self.webkitPostMessage || self.postMessage, // enum MESSAGE_TYPES = { WORLDREPORT: 0, COLLISIONREPORT: 1, VEHICLEREPORT: 2, CONSTRAINTREPORT: 3 }, // temp variables _object, _vector, _transform, knownConstraints = {}, // functions public_functions = {}, createShape, reportWorld, reportCollisions, // world variables fixedTimeStep, // used when calling stepSimulation rateLimit, // sets whether or not to sync the simulation rate with fixedTimeStep last_simulation_time, last_simulation_duration = 0, world, transform, _vec3_1, _vec3_2, _vec3_3, _quat, // private cache _objects = {}, _materials = {}, _objects_ammo = {}, _num_objects = 0, _object_shapes = {}, // The following objects are to track objects that ammo.js doesn't clean // up. All are cleaned up when they're corresponding body is destroyed. // Unfortunately, it's very difficult to get at these objects from the // body, so we have to track them ourselves. _motion_states = {}, // Don't need to worry about it for cached shapes. _noncached_shapes = {}, // A body with a compound shape always has a regular shape as well, so we // have track them separately. _compound_shapes = {}, // object reporting REPORT_CHUNKSIZE, // report array is increased in increments of this chunk size WORLDREPORT_ITEMSIZE = 14, // how many float values each reported item needs worldreport, COLLISIONREPORT_ITEMSIZE = 5, // one float for each object id, and a Vec3 contact normal collisionreport, VEHICLEREPORT_ITEMSIZE = 9, // vehicle id, wheel index, 3 for position, 4 for rotation vehiclereport, CONSTRAINTREPORT_ITEMSIZE = 6, // constraint id, offset object, offset, applied impulse constraintreport; var ab = new ArrayBuffer( 1 ); transferableMessage( ab, [ab] ); var SUPPORT_TRANSFERABLE = ( ab.byteLength === 0 ); public_functions.registerMaterial = function( description ) { _materials[ description.id ] = description; }; public_functions.unRegisterMaterial = function( description ) { delete _materials[ description.id ]; }; public_functions.setFixedTimeStep = function( description ) { fixedTimeStep = description; }; public_functions.updateTransform = function( details ) { _object = _objects[details.id]; _object.getMotionState().getWorldTransform( _transform ); if ( details.pos ) { _vec3_1.setX(details.pos.x); _vec3_1.setY(details.pos.y); _vec3_1.setZ(details.pos.z); _transform.setOrigin(_vec3_1); } if ( details.quat ) { _quat.setX(details.quat.x); _quat.setY(details.quat.y); _quat.setZ(details.quat.z); _quat.setW(details.quat.w); _transform.setRotation(_quat); } _object.setWorldTransform( _transform ); _object.activate(); }; public_functions.applyTorque = function ( details ) { _vec3_1.setX(details.torque_x); _vec3_1.setY(details.torque_y); _vec3_1.setZ(details.torque_z); _objects[details.id].applyTorque( _vec3_1 ); _objects[details.id].activate(); }; public_functions.applyCentralForce = function ( details ) { _vec3_1.setX(details.x); _vec3_1.setY(details.y); _vec3_1.setZ(details.z); _objects[details.id].applyCentralForce(_vec3_1); _objects[details.id].activate(); }; public_functions.applyForce = function ( details ) { _vec3_1.setX(details.force_x); _vec3_1.setY(details.force_y); _vec3_1.setZ(details.force_z); _vec3_2.setX(details.x); _vec3_2.setY(details.y); _vec3_2.setZ(details.z); _objects[details.id].applyForce( _vec3_1, _vec3_2 ); _objects[details.id].activate(); }; public_functions.onSimulationResume = function( params ) { last_simulation_time = Date.now(); }; public_functions.setAngularVelocity = function ( details ) { _vec3_1.setX(details.x); _vec3_1.setY(details.y); _vec3_1.setZ(details.z); _objects[details.id].setAngularVelocity( _vec3_1 ); _objects[details.id].activate(); }; public_functions.setLinearVelocity = function ( details ) { _vec3_1.setX(details.x); _vec3_1.setY(details.y); _vec3_1.setZ(details.z); _objects[details.id].setLinearVelocity( _vec3_1 ); _objects[details.id].activate(); }; public_functions.setAngularFactor = function ( details ) { _vec3_1.setX(details.x); _vec3_1.setY(details.y); _vec3_1.setZ(details.z); _objects[details.id].setAngularFactor( _vec3_1 ); }; public_functions.setLinearFactor = function ( details ) { _vec3_1.setX(details.x); _vec3_1.setY(details.y); _vec3_1.setZ(details.z); _objects[details.id].setLinearFactor( _vec3_1 ); }; public_functions.setDamping = function ( details ) { _objects[details.id].setDamping( details.linear, details.angular ); }; public_functions.setCcdMotionThreshold = function ( details ) { _objects[details.id].setCcdMotionThreshold( details.threshold ); }; public_functions.setCcdSweptSphereRadius = function ( details ) { _objects[details.id].setCcdSweptSphereRadius( details.radius ); }; public_functions.simulate = function simulate( params ) { if ( world ) { params = params || {}; if ( !params.timeStep ) { if ( last_simulation_time ) { params.timeStep = 0; while ( params.timeStep + last_simulation_duration <= fixedTimeStep ) { params.timeStep = ( Date.now() - last_simulation_time ) / 1000; // time since last simulation } } else { params.timeStep = fixedTimeStep; // handle first frame } } else { if ( params.timeStep < fixedTimeStep ) { params.timeStep = fixedTimeStep; } } params.maxSubSteps = params.maxSubSteps || Math.ceil( params.timeStep / fixedTimeStep ); // If maxSubSteps is not defined, keep the simulation fully up to date last_simulation_duration = Date.now(); world.stepSimulation( params.timeStep, params.maxSubSteps, fixedTimeStep ); self.dispatchEvent( new CustomEvent("report", { detail: { worker: self } }) ); last_simulation_duration = ( Date.now() - last_simulation_duration ) / 1000; last_simulation_time = Date.now(); } }; self.onmessage = function( event ) { if ( event.data instanceof Float32Array ) { // transferable object switch ( event.data[0] ) { case MESSAGE_TYPES.WORLDREPORT: worldreport = new Float32Array( event.data ); break; case MESSAGE_TYPES.COLLISIONREPORT: collisionreport = new Float32Array( event.data ); break; case MESSAGE_TYPES.VEHICLEREPORT: vehiclereport = new Float32Array( event.data ); break; case MESSAGE_TYPES.CONSTRAINTREPORT: constraintreport = new Float32Array( event.data ); break; } return; } if ( event.data.cmd && public_functions[event.data.cmd] ) { //if ( event.data.params.id !== undefined && _objects[event.data.params.id] === undefined && event.data.cmd !== 'addObject' && event.data.cmd !== 'registerMaterial' ) return; public_functions[event.data.cmd]( event.data.params ); } }; /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ (function( self ){ let public_functions = self.public_functions; let AMMO; public_functions.init = function( params ) { if ( !self.Ammo ) importScripts( params.ammo ); if (typeof Ammo !== "function"){ AMMO = function(){ return { then : function( fnc ){ fnc( Ammo ); } }; }; } else { AMMO = Ammo; } AMMO().then( function( Ammolib ) { Ammo = Ammolib; _transform = new Ammo.btTransform; _vec3_1 = new Ammo.btVector3( 0, 0, 0 ); _vec3_2 = new Ammo.btVector3( 0, 0, 0 ); _vec3_3 = new Ammo.btVector3( 0, 0, 0 ); _quat = new Ammo.btQuaternion( 0, 0, 0, 0 ); self.dispatchEvent( new CustomEvent("init", { detail: { worker: self } }) ); REPORT_CHUNKSIZE = params.reportsize || 50; if ( SUPPORT_TRANSFERABLE ) { // Transferable messages are supported, take advantage of them with TypedArrays worldreport = new Float32Array(2 + REPORT_CHUNKSIZE * WORLDREPORT_ITEMSIZE); // message id + # of objects to report + chunk size * # of values per object collisionreport = new Float32Array(2 + REPORT_CHUNKSIZE * COLLISIONREPORT_ITEMSIZE); // message id + # of collisions to report + chunk size * # of values per object vehiclereport = new Float32Array(2 + REPORT_CHUNKSIZE * VEHICLEREPORT_ITEMSIZE); // message id + # of vehicles to report + chunk size * # of values per object constraintreport = new Float32Array(2 + REPORT_CHUNKSIZE * CONSTRAINTREPORT_ITEMSIZE); // message id + # of constraints to report + chunk size * # of values per object } else { // Transferable messages are not supported, send data as normal arrays worldreport = []; collisionreport = []; vehiclereport = []; constraintreport = []; } worldreport[0] = MESSAGE_TYPES.WORLDREPORT; collisionreport[0] = MESSAGE_TYPES.COLLISIONREPORT; vehiclereport[0] = MESSAGE_TYPES.VEHICLEREPORT; constraintreport[0] = MESSAGE_TYPES.CONSTRAINTREPORT; if ( params.type === "soft" ) { let collisionConfiguration = new Ammo.btSoftBodyRigidBodyCollisionConfiguration(); world = new Ammo.btSoftRigidDynamicsWorld( new Ammo.btCollisionDispatcher( collisionConfiguration ), new Ammo.btDbvtBroadphase(), new Ammo.btSequentialImpulseConstraintSolver(), collisionConfiguration, new Ammo.btDefaultSoftBodySolver() ); } else { let collisionConfiguration = new Ammo.btDefaultCollisionConfiguration; let broadphase; params.broadphase = params.broadphase || { type: 'dynamic' }; switch ( params.broadphase.type ) { case 'sweepprune': _vec3_1.setX(params.broadphase.aabbmin.x); _vec3_1.setY(params.broadphase.aabbmin.y); _vec3_1.setZ(params.broadphase.aabbmin.z); _vec3_2.setX(params.broadphase.aabbmax.x); _vec3_2.setY(params.broadphase.aabbmax.y); _vec3_2.setZ(params.broadphase.aabbmax.z); broadphase = new Ammo.btAxisSweep3( _vec3_1, _vec3_2 ); break; case 'dynamic': default: broadphase = new Ammo.btDbvtBroadphase(); break; } world = new Ammo.btDiscreteDynamicsWorld( new Ammo.btCollisionDispatcher( collisionConfiguration ), broadphase, new Ammo.btSequentialImpulseConstraintSolver(), collisionConfiguration ); } fixedTimeStep = params.fixedTimeStep; rateLimit = params.rateLimit; transferableMessage({ cmd: 'worldReady' }); }); }; })(self); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ (function( self ){ let public_functions = self.public_functions; let Ammo; let _vec3_1, _vec3_2; let reportWorld; self.addEventListener("init", function( event ){ Ammo = self.Ammo; _vec3_1 = self._vec3_1 || new Ammo.btVector3(0,0,0); _vec3_2 = self._vec3_2 || new Ammo.btVector3(0,0,0); }); self.addEventListener("report", function( event ){ reportWorld(); }); public_functions.setGravity = function( description ) { console.log( description ); let vecGravity = new Ammo.btVector3( description.x || 0, description.y, description.z || 0 ); world.setGravity( vecGravity ); try { world.getWorldInfo().set_m_gravity( vecGravity ); } catch( e ){ } }; public_functions.addObject = function( description ) { var localInertia, shape, motionState, rbInfo, body; shape = createShape( description ); if (!shape) return; // If there are children then this is a compound shape if ( description.children ) { var compound_shape = new Ammo.btCompoundShape, _child; compound_shape.addChildShape( _transform, shape ); for ( let i = 0; i < description.children.length; i++ ) { _child = description.children[i]; var trans = new Ammo.btTransform; trans.setIdentity(); _vec3_1.setX(_child.position_offset.x); _vec3_1.setY(_child.position_offset.y); _vec3_1.setZ(_child.position_offset.z); trans.setOrigin(_vec3_1); _quat.setX(_child.rotation.x); _quat.setY(_child.rotation.y); _quat.setZ(_child.rotation.z); _quat.setW(_child.rotation.w); trans.setRotation(_quat); shape = createShape( description.children[i] ); compound_shape.addChildShape( trans, shape ); Ammo.destroy(trans); } shape = compound_shape; _compound_shapes[ description.id ] = shape; } if ( description.type === "soft" ){ addSoftShape( description, shape ); } else { _vec3_1.setX(0); _vec3_1.setY(0); _vec3_1.setZ(0); shape.calculateLocalInertia( description.mass, _vec3_1 ); _transform.setIdentity(); _vec3_2.setX( description.position.x ); _vec3_2.setY( description.position.y ); _vec3_2.setZ( description.position.z ); _transform.setOrigin(_vec3_2); _quat.setX(description.rotation.x); _quat.setY(description.rotation.y); _quat.setZ(description.rotation.z); _quat.setW(description.rotation.w); _transform.setRotation(_quat); motionState = new Ammo.btDefaultMotionState( _transform ); // #TODO: btDefaultMotionState supports center of mass offset as second argument - implement rbInfo = new Ammo.btRigidBodyConstructionInfo( description.mass, motionState, shape, _vec3_1 ); if ( description.materialId !== undefined ) { rbInfo.set_m_friction( _materials[ description.materialId ].friction ); rbInfo.set_m_restitution( _materials[ description.materialId ].restitution ); } body = new Ammo.btRigidBody( rbInfo ); Ammo.destroy(rbInfo); if ( typeof description.collision_flags !== 'undefined' ) { body.setCollisionFlags( description.collision_flags ); } world.addRigidBody( body ); body.id = description.id; _objects[ body.id ] = body; _motion_states[ body.id ] = motionState; var ptr = body.a != undefined ? body.a : body.ptr; _objects_ammo[ptr] = body.id; _num_objects++; transferableMessage({ cmd: 'objectReady', params: body.id }); } }; public_functions.removeObject = function( details ) { if ( details.type === "soft" ){ removeSoftShape( details ); } else { world.removeRigidBody( _objects[details.id] ); Ammo.destroy( _objects[details.id] ); Ammo.destroy( _motion_states[details.id] ); if (_compound_shapes[details.id]) Ammo.destroy(_compound_shapes[details.id]); if (_noncached_shapes[details.id]) Ammo.destroy(_noncached_shapes[details.id]); var ptr = _objects[details.id].a != undefined ? _objects[details.id].a : _objects[details.id].ptr; delete _objects_ammo[ptr]; delete _objects[details.id]; delete _motion_states[details.id]; if (_compound_shapes[details.id]) delete _compound_shapes[details.id]; if (_noncached_shapes[details.id]) delete _noncached_shapes[details.id]; _num_objects--; } }; reportWorld = function() { var index, object, transform, origin, rotation, offset = 0, i = 0; if ( SUPPORT_TRANSFERABLE ) { if ( worldreport.length < 2 + _num_objects * WORLDREPORT_ITEMSIZE ) { worldreport = new Float32Array( 2 + // message id & # objects in report ( Math.ceil( _num_objects / REPORT_CHUNKSIZE ) * REPORT_CHUNKSIZE ) * WORLDREPORT_ITEMSIZE // # of values needed * item size ); worldreport[0] = MESSAGE_TYPES.WORLDREPORT; } } worldreport[1] = _num_objects; // record how many objects we're reporting on //for ( i = 0; i < worldreport[1]; i++ ) { for ( index in _objects ) { if ( _objects.hasOwnProperty( index ) ) { object = _objects[index]; // #TODO: we can't use center of mass transform when center of mass can change, // but getMotionState().getWorldTransform() screws up on objects that have been moved //object.getMotionState().getWorldTransform( transform ); transform = object.getCenterOfMassTransform(); origin = transform.getOrigin(); rotation = transform.getRotation(); // add values to report offset = 2 + (i++) * WORLDREPORT_ITEMSIZE; worldreport[ offset ] = object.id; worldreport[ offset + 1 ] = origin.x(); worldreport[ offset + 2 ] = origin.y(); worldreport[ offset + 3 ] = origin.z(); worldreport[ offset + 4 ] = rotation.x(); worldreport[ offset + 5 ] = rotation.y(); worldreport[ offset + 6 ] = rotation.z(); worldreport[ offset + 7 ] = rotation.w(); _vector = object.getLinearVelocity(); worldreport[ offset + 8 ] = _vector.x(); worldreport[ offset + 9 ] = _vector.y(); worldreport[ offset + 10 ] = _vector.z(); _vector = object.getAngularVelocity(); worldreport[ offset + 11 ] = _vector.x(); worldreport[ offset + 12 ] = _vector.y(); worldreport[ offset + 13 ] = _vector.z(); } } if ( SUPPORT_TRANSFERABLE ) { transferableMessage( worldreport.buffer, [worldreport.buffer] ); } else { transferableMessage( worldreport ); } }; })(self); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ (function( self ){ let Ammo; let _vec3_1, _vec3_2; let _object_shapes = self._object_shapes; let public_functions = self.public_functions; self.addEventListener("init", function( event ){ Ammo = self.Ammo; _vec3_1 = self._vec3_1 || new Ammo.btVector3(0,0,0); _vec3_2 = self._vec3_2 || new Ammo.btVector3(0,0,0); }); let setShapeCache = function ( cache_key, shape ) { _object_shapes[ cache_key ] = shape; }; let getShapeFromCache = function ( cache_key ) { if ( _object_shapes[ cache_key ] !== undefined ) { return _object_shapes[ cache_key ]; } return null; }; self.createShape = function( description ) { let cache_key, shape; _transform.setIdentity(); switch ( description.type ) { case 'soft': shape = createSoftShape( description ); break; case 'plane': cache_key = 'plane_' + description.normal.x + '_' + description.normal.y + '_' + description.normal.z; if ( ( shape = getShapeFromCache( cache_key ) ) === null ) { _vec3_1.setX(description.normal.x); _vec3_1.setY(description.normal.y); _vec3_1.setZ(description.normal.z); shape = new Ammo.btStaticPlaneShape(_vec3_1, 0 ); setShapeCache( cache_key, shape ); } break; case 'box': cache_key = 'box_' + description.width + '_' + description.height + '_' + description.depth; if ( ( shape = getShapeFromCache( cache_key ) ) === null ) { _vec3_1.setX(description.width / 2); _vec3_1.setY(description.height / 2); _vec3_1.setZ(description.depth / 2); shape = new Ammo.btBoxShape( _vec3_1 ); setShapeCache( cache_key, shape ); } break; case 'sphere': cache_key = 'sphere_' + description.radius; if ( ( shape = getShapeFromCache( cache_key ) ) === null ) { shape = new Ammo.btSphereShape( description.radius ); setShapeCache( cache_key, shape ); } break; case 'cylinder': cache_key = 'cylinder_' + description.width + '_' + description.height + '_' + description.depth; if ( ( shape = getShapeFromCache( cache_key ) ) === null ) { _vec3_1.setX(description.width / 2); _vec3_1.setY(description.height / 2); _vec3_1.setZ(description.depth / 2); shape = new Ammo.btCylinderShape(_vec3_1); setShapeCache( cache_key, shape ); } break; case 'capsule': cache_key = 'capsule_' + description.radius + '_' + description.height; if ( ( shape = getShapeFromCache( cache_key ) ) === null ) { // In Bullet, capsule height excludes the end spheres shape = new Ammo.btCapsuleShape( description.radius, description.height - 2 * description.radius ); setShapeCache( cache_key, shape ); } break; case 'cone': cache_key = 'cone_' + description.radius + '_' + description.height; if ( ( shape = getShapeFromCache( cache_key ) ) === null ) { shape = new Ammo.btConeShape( description.radius, description.height ); setShapeCache( cache_key, shape ); } break; case 'concave': var i, triangle, triangle_mesh = new Ammo.btTriangleMesh; if (!description.triangles.length) return false; for ( i = 0; i < description.triangles.length; i++ ) { triangle = description.triangles[i]; _vec3_1.setX(triangle[0].x); _vec3_1.setY(triangle[0].y); _vec3_1.setZ(triangle[0].z); _vec3_2.setX(triangle[1].x); _vec3_2.setY(triangle[1].y); _vec3_2.setZ(triangle[1].z); _vec3_3.setX(triangle[2].x); _vec3_3.setY(triangle[2].y); _vec3_3.setZ(triangle[2].z); triangle_mesh.addTriangle( _vec3_1, _vec3_2, _vec3_3, true ); } shape = new Ammo.btBvhTriangleMeshShape( triangle_mesh, true, true ); _noncached_shapes[description.id] = shape; break; case 'convex': let point; shape = new Ammo.btConvexHullShape; for ( let i = 0; i < description.points.length; i++ ) { point = description.points[i]; _vec3_1.setX(point.x); _vec3_1.setY(point.y); _vec3_1.setZ(point.z); shape.addPoint(_vec3_1); } _noncached_shapes[description.id] = shape; break; case 'heightfield': var ptr = Ammo.allocate(4 * description.xpts * description.ypts, "float", Ammo.ALLOC_NORMAL); for (var f = 0; f < description.points.length; f++) { Ammo.setValue(ptr + f, description.points[f] , 'float'); } shape = new Ammo.btHeightfieldTerrainShape( description.xpts, description.ypts, ptr, 1, -description.absMaxHeight, description.absMaxHeight, 2, 0, false ); _vec3_1.setX(description.xsize/(description.xpts - 1)); _vec3_1.setY(description.ysize/(description.ypts - 1)); _vec3_1.setZ(1); shape.setLocalScaling(_vec3_1); _noncached_shapes[description.id] = shape; break; default: // Not recognized return; break; } return shape; }; public_functions.updateMass = function( details ) { // #TODO: changing a static object into dynamic is buggy _object = _objects[details.id]; // Per http://www.bulletphysics.org/Bullet/phpBB3/viewtopic.php?p=&f=9&t=3663#p13816 world.removeRigidBody( _object ); _vec3_1.setX(0); _vec3_1.setY(0); _vec3_1.setZ(0); _object.setMassProps( details.mass, _vec3_1 ); world.addRigidBody( _object ); _object.activate(); }; public_functions.applyCentralImpulse = function ( details ) { _vec3_1.setX(details.x); _vec3_1.setY(details.y); _vec3_1.setZ(details.z); _objects[details.id].applyCentralImpulse(_vec3_1); _objects[details.id].activate(); }; public_functions.applyImpulse = function ( details ) { _vec3_1.setX(details.impulse_x); _vec3_1.setY(details.impulse_y); _vec3_1.setZ(details.impulse_z); _vec3_2.setX(details.x); _vec3_2.setY(details.y); _vec3_2.setZ(details.z); _objects[details.id].applyImpulse( _vec3_1, _vec3_2 ); _objects[details.id].activate(); }; //reportCollisions self.addEventListener("report", function() { var i, offset, dp = world.getDispatcher(), num = dp.getNumManifolds(), manifold, num_contacts, j, pt, _collided = false; if ( SUPPORT_TRANSFERABLE ) { if ( collisionreport.length < 2 + num * COLLISIONREPORT_ITEMSIZE ) { collisionreport = new Float32Array( 2 + // message id & # objects in report ( Math.ceil( _num_objects / REPORT_CHUNKSIZE ) * REPORT_CHUNKSIZE ) * COLLISIONREPORT_ITEMSIZE // # of values needed * item size ); collisionreport[0] = MESSAGE_TYPES.COLLISIONREPORT; } } collisionreport[1] = 0; // how many collisions we're reporting on for ( i = 0; i < num; i++ ) { manifold = dp.getManifoldByIndexInternal( i ); num_contacts = manifold.getNumContacts(); if ( num_contacts === 0 ) { continue; } for ( j = 0; j < num_contacts; j++ ) { pt = manifold.getContactPoint( j ); //if ( pt.getDistance() < 0 ) { offset = 2 + (collisionreport[1]++) * COLLISIONREPORT_ITEMSIZE; collisionreport[ offset ] = _objects_ammo[ manifold.getBody0() ]; collisionreport[ offset + 1 ] = _objects_ammo[ manifold.getBody1() ]; _vector = pt.get_m_normalWorldOnB(); collisionreport[ offset + 2 ] = _vector.x(); collisionreport[ offset + 3 ] = _vector.y(); collisionreport[ offset + 4 ] = _vector.z(); break; //} transferableMessage( _objects_ammo ); } } if ( SUPPORT_TRANSFERABLE ) { transferableMessage( collisionreport.buffer, [collisionreport.buffer] ); } else { transferableMessage( collisionreport ); } }); })(self); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ (function( self ){ let Ammo; let softBodyHelpers; let margin = 0.05; let _bodies = {}; let _descriptions = {}; let report = []; MESSAGE_TYPES.SOFTREPORT = 4; self.addEventListener("init", function( event ){ Ammo = self.Ammo; if ( typeof Ammo.btSoftBodyHelpers === "function" ) softBodyHelpers = new Ammo.btSoftBodyHelpers(); }); self.createSoftShape = function( description ){ let volumeSoftBody = softBodyHelpers.CreateFromTriMesh( world.getWorldInfo(), description.ammoVertices, description.ammoIndices, description.ammoIndices.length / 3, true ); let sbConfig = volumeSoftBody.get_m_cfg(); sbConfig.set_viterations( 40 ); sbConfig.set_piterations( 40 ); // Soft-soft and soft-rigid collisions sbConfig.set_collisions( 0x11 ); // Friction sbConfig.set_kDF( 0.1 ); // Damping sbConfig.set_kDP( 0.01 ); // Pressure sbConfig.set_kPR( description.pressure ); // Stiffness volumeSoftBody.get_m_materials().at( 0 ).set_m_kLST( 0.9 ); volumeSoftBody.get_m_materials().at( 0 ).set_m_kAST( 0.9 ); volumeSoftBody.setTotalMass( description.mass, false ); Ammo.castObject( volumeSoftBody, Ammo.btCollisionObject ).getCollisionShape().setMargin( margin ); volumeSoftBody.id = description.id; _descriptions[description.id] = { ammoIndexAssociation : description.ammoIndexAssociation }; return volumeSoftBody; }; self.addSoftShape = function( description, shape ){ _bodies[description.id] = shape; world.addSoftBody( shape, 1, - 1 ); shape.setActivationState( 4 ); }; self.removeSoftShape = function( details ){ //ToDo delete _descriptions[details.id]; }; self.addEventListener("report", function() { let keys = Object.keys( _bodies ); if(keys.length < 1) return; report = []; report[0] = MESSAGE_TYPES.SOFTREPORT; report[1] = keys.length; let offset = 2; let softBody; // Update soft volumes for ( let i = 0, il = keys.length; i < il; i ++ ) { softBody = _bodies[ keys[i] ]; report[ offset ] = keys[i]; offset += 1; var association = _descriptions[ softBody.id ].ammoIndexAssociation; var numVerts = association.length; var nodes = softBody.get_m_nodes(); report[ offset ] = numVerts; for ( var j = 0; j < numVerts; j ++ ) { var node = nodes.at( j ); var nodePos = node.get_m_x(); report[offset+1] = nodePos.x(); report[offset+2] = nodePos.y(); report[offset+3] = nodePos.z(); var nodeNormal = node.get_m_n(); report[offset+4] = nodeNormal.x(); report[offset+5] = nodeNormal.y(); report[offset+6] = nodeNormal.z(); offset += 6; } offset += 1; } transferableMessage( report ); }); })( self ); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ (function( self ){ let public_functions = self.public_functions; let reportVehicles; let _vehicles = {}; let _num_wheels = 0; let _objects = self._objects; let _vec3_1, _vec3_2, _vec3_3; self.addEventListener("init", function( event ){ _vec3_1 = self._vec3_1 || new Ammo.btVector3(0,0,0); _vec3_2 = self._vec3_2 || new Ammo.btVector3(0,0,0); _vec3_3 = self._vec3_3 || new Ammo.btVector3(0,0,0); }); self.addEventListener("report", function( event ){ reportVehicles(); }); public_functions.addVehicle = function( description ) { var vehicle_tuning = new Ammo.btVehicleTuning(), vehicle; vehicle_tuning.set_m_suspensionStiffness( description.suspension_stiffness ); vehicle_tuning.set_m_suspensionCompression( description.suspension_compression ); vehicle_tuning.set_m_suspensionDamping( description.suspension_damping ); vehicle_tuning.set_m_maxSuspensionTravelCm( description.max_suspension_travel ); vehicle_tuning.set_m_maxSuspensionForce( description.max_suspension_force ); vehicle = new Ammo.btRaycastVehicle( vehicle_tuning, _objects[ description.rigidBody ], new Ammo.btDefaultVehicleRaycaster( world ) ); vehicle.tuning = vehicle_tuning; _objects[ description.rigidBody ].setActivationState( 4 ); vehicle.setCoordinateSystem( 0, 1, 2 ); world.addVehicle( vehicle ); _vehicles[ description.id ] = vehicle; }; public_functions.removeVehicle = function( description ) { delete _vehicles[ description.id ]; }; public_functions.addWheel = function( description ) { if ( _vehicles[description.id] !== undefined ) { var tuning = _vehicles[description.id].tuning; if ( description.tuning !== undefined ) { tuning = new Ammo.btVehicleTuning(); tuning.set_m_suspensionStiffness( description.tuning.suspension_stiffness ); tuning.set_m_suspensionCompression( description.tuning.suspension_compression ); tuning.set_m_suspensionDamping( description.tuning.suspension_damping ); tuning.set_m_maxSuspensionTravelCm( description.tuning.max_suspension_travel ); tuning.set_m_maxSuspensionForce( description.tuning.max_suspension_force ); } _vec3_1.setX(description.connection_point.x); _vec3_1.setY(description.connection_point.y); _vec3_1.setZ(description.connection_point.z); _vec3_2.setX(description.wheel_direction.x); _vec3_2.setY(description.wheel_direction.y); _vec3_2.setZ(description.wheel_direction.z); _vec3_3.setX(description.wheel_axle.x); _vec3_3.setY(description.wheel_axle.y); _vec3_3.setZ(description.wheel_axle.z); _vehicles[description.id].addWheel( _vec3_1, _vec3_2, _vec3_3, description.suspension_rest_length, description.wheel_radius, tuning, description.is_front_wheel ); } _num_wheels++; if ( SUPPORT_TRANSFERABLE ) { vehiclereport = new Float32Array(1 + _num_wheels * VEHICLEREPORT_ITEMSIZE); // message id & ( # of objects to report * # of values per object ) vehiclereport[0] = MESSAGE_TYPES.VEHICLEREPORT; } else { vehiclereport = [ MESSAGE_TYPES.VEHICLEREPORT ]; } }; public_functions.setSteering = function( details ) { if ( _vehicles[details.id] !== undefined ) { _vehicles[details.id].setSteeringValue( details.steering, details.wheel ); } }; public_functions.setBrake = function( details ) { if ( _vehicles[details.id] !== undefined ) { _vehicles[details.id].setBrake( details.brake, details.wheel ); } }; public_functions.applyEngineForce = function( details ) { if ( _vehicles[details.id] !== undefined ) { _vehicles[details.id].applyEngineForce( details.force, details.wheel ); } }; reportVehicles = function() { var index, vehicle, transform, origin, rotation, offset = 0, i = 0, j = 0; if ( SUPPORT_TRANSFERABLE ) { if ( vehiclereport.length < 2 + _num_wheels * VEHICLEREPORT_ITEMSIZE ) { vehiclereport = new Float32Array( 2 + // message id & # objects in report ( Math.ceil( _num_wheels / REPORT_CHUNKSIZE ) * REPORT_CHUNKSIZE ) * VEHICLEREPORT_ITEMSIZE // # of values needed * item size ); vehiclereport[0] = MESSAGE_TYPES.VEHICLEREPORT; } } for ( index in _vehicles ) { if ( _vehicles.hasOwnProperty( index ) ) { vehicle = _vehicles[index]; for ( j = 0; j < vehicle.getNumWheels(); j++ ) { //vehicle.updateWheelTransform( j, true ); //transform = vehicle.getWheelTransformWS( j ); transform = vehicle.getWheelInfo( j ).get_m_worldTransform(); origin = transform.getOrigin(); rotation = transform.getRotation(); // add values to report offset = 1 + (i++) * VEHICLEREPORT_ITEMSIZE; vehiclereport[ offset ] = index; vehiclereport[ offset + 1 ] = j; vehiclereport[ offset + 2 ] = origin.x(); vehiclereport[ offset + 3 ] = origin.y(); vehiclereport[ offset + 4 ] = origin.z(); vehiclereport[ offset + 5 ] = rotation.x(); vehiclereport[ offset + 6 ] = rotation.y(); vehiclereport[ offset + 7 ] = rotation.z(); vehiclereport[ offset + 8 ] = rotation.w(); } } } if ( j !== 0 ) { if ( SUPPORT_TRANSFERABLE ) { transferableMessage( vehiclereport.buffer, [vehiclereport.buffer] ); } else { transferableMessage( vehiclereport ); } } }; })(self); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ 'use strict'; (function( self ){ self._constraints = {}; self._num_constraints = 0; public_functions.addConstraint = function ( details ) { var constraint; if ( typeof knownConstraints[details.type] === "function" ) { constraint = knownConstraints[details.type]( details ); } else { return; } world.addConstraint( constraint ); constraint.enableFeedback(); _constraints[ details.id ] = constraint; _num_constraints++; if ( SUPPORT_TRANSFERABLE ) { constraintreport = new Float32Array(1 + _num_constraints * CONSTRAINTREPORT_ITEMSIZE); // message id & ( # of objects to report * # of values per object ) constraintreport[0] = MESSAGE_TYPES.CONSTRAINTREPORT; } else { constraintreport = [ MESSAGE_TYPES.CONSTRAINTREPORT ]; } }; public_functions.removeConstraint = function( details ) { var constraint = _constraints[ details.id ]; if ( constraint !== undefined ) { world.removeConstraint( constraint ); delete _constraints[ details.id ]; _num_constraints--; } }; public_functions.constraint_setBreakingImpulseThreshold = function( details ) { var constraint = _constraints[ details.id ]; if ( constraint !== undefind ) { constraint.setBreakingImpulseThreshold( details.threshold ); } }; self.addEventListener("report", function( event ){ var index, constraint, offset_body, transform, origin, offset = 0, i = 0; if ( SUPPORT_TRANSFERABLE ) { if ( constraintreport.length < 2 + _num_constraints * CONSTRAINTREPORT_ITEMSIZE ) { constraintreport = new Float32Array( 2 + // message id & # objects in report ( Math.ceil( _num_constraints / REPORT_CHUNKSIZE ) * REPORT_CHUNKSIZE ) * CONSTRAINTREPORT_ITEMSIZE // # of values needed * item size ); constraintreport[0] = MESSAGE_TYPES.CONSTRAINTREPORT; } } for ( index in _constraints ) { if ( _constraints.hasOwnProperty( index ) ) { constraint = _constraints[index]; offset_body = constraint.getRigidBodyA(); transform = constraint.getFrameOffsetA(); origin = transform.getOrigin(); // add values to report offset = 1 + (i++) * CONSTRAINTREPORT_ITEMSIZE; constraintreport[ offset ] = index; constraintreport[ offset + 1 ] = offset_body.id; constraintreport[ offset + 2 ] = origin.getX(); constraintreport[ offset + 3 ] = origin.getY(); constraintreport[ offset + 4 ] = origin.getZ(); constraintreport[ offset + 5 ] = constraint.getAppliedImpulse(); } } if ( i !== 0 ) { if ( SUPPORT_TRANSFERABLE ) { transferableMessage( constraintreport.buffer, [constraintreport.buffer] ); } else { transferableMessage( constraintreport ); } } }); })(self); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ 'use strict'; (function( self ){ let public_functions = self.public_functions; let _constraints = self._constraints || {}; let _objects = self._objects; let Ammo; let _vec3_1; let _vec3_2; self.addEventListener("init", function( event ){ Ammo = self.Ammo; _vec3_1 = self._vec3_1 || new Ammo.btVector3(0,0,0); _vec3_2 = self._vec3_2 || new Ammo.btVector3(0,0,0); }); self.knownConstraints.dof = function( details ){ let transforma = new Ammo.btTransform(); transforma.setIdentity(); _vec3_1.setX(details.positiona.x); _vec3_1.setY(details.positiona.y); _vec3_1.setZ(details.positiona.z); transforma.setOrigin(_vec3_1 ); let rotation = transforma.getRotation(); rotation.setEulerZYX( -details.axisa.z, -details.axisa.y, -details.axisa.x ); transforma.setRotation( rotation ); let constraint; if ( details.objectb ) { let transformb = new Ammo.btTransform(); transformb.setIdentity(); _vec3_2.setX(details.positionb.x); _vec3_2.setY(details.positionb.y); _vec3_2.setZ(details.positionb.z); transformb.setOrigin(_vec3_2); rotation = transformb.getRotation(); rotation.setEulerZYX( -details.axisb.z, -details.axisb.y, -details.axisb.x ); transformb.setRotation( rotation ); constraint = new Ammo.btGeneric6DofConstraint( _objects[ details.objecta ], _objects[ details.objectb ], transforma, transformb ); Ammo.destroy( transformb ); } else { constraint = new Ammo.btGeneric6DofConstraint( _objects[ details.objecta ],
} Ammo.destroy( transforma ); return constraint; }; public_functions.dof_setLinearLowerLimit = function( params ) { let constraint = _constraints[ params.constraint ]; _vec3_1.setX(params.x); _vec3_1.setY(params.y); _vec3_1.setZ(params.z); constraint.setLinearLowerLimit(_vec3_1); activate(constraint); }; public_functions.dof_setLinearUpperLimit = function( params ) { let constraint = _constraints[ params.constraint ]; _vec3_1.setX(params.x); _vec3_1.setY(params.y); _vec3_1.setZ(params.z); constraint.setLinearUpperLimit(_vec3_1); activate(constraint); }; public_functions.dof_setAngularLowerLimit = function( params ) { let constraint = _constraints[ params.constraint ]; _vec3_1.setX(params.x); _vec3_1.setY(params.y); _vec3_1.setZ(params.z); constraint.setAngularLowerLimit(_vec3_1); activate(constraint); }; public_functions.dof_setAngularUpperLimit = function( params ) { let constraint = _constraints[ params.constraint ]; _vec3_1.setX(params.x); _vec3_1.setY(params.y); _vec3_1.setZ(params.z); constraint.setAngularUpperLimit(_vec3_1); activate(constraint); }; public_functions.dof_enableAngularMotor = function( params ) { let constraint = _constraints[ params.constraint ]; let motor = constraint.getRotationalLimitMotor( params.which ); motor.set_m_enableMotor( true ); activate(constraint); }; public_functions.dof_configureAngularMotor = function( params ) { let constraint = _constraints[ params.constraint ]; let motor = constraint.getRotationalLimitMotor( params.which ); motor.set_m_loLimit( params.low_angle ); motor.set_m_hiLimit( params.high_angle ); motor.set_m_targetVelocity( params.velocity ); motor.set_m_maxMotorForce( params.max_force ); activate(constraint); }; public_functions.dof_disableAngularMotor = function( params ) { var constraint = _constraints[ params.constraint ]; var motor = constraint.getRotationalLimitMotor( params.which ); motor.set_m_enableMotor( false ); activate(constraint); }; let activate = function( constraint ){ constraint.getRigidBodyA().activate(); if ( constraint.getRigidBodyB() ) { constraint.getRigidBodyB().activate(); } }; })( self ); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ 'use strict'; (function( self ){ let public_functions = self.public_functions; let _constraints = self._constraints || {}; let Ammo, _vec3_1, _vec3_2, _vec3_3; self.addEventListener("init", function( event ){ Ammo = self.Ammo; _vec3_1 = self._vec3_1 || new Ammo.btVector3(0,0,0); _vec3_2 = self._vec3_2 || new Ammo.btVector3(0,0,0); _vec3_3 = self._vec3_3 || new Ammo.btVector3(0,0,0); }); self.knownConstraints.hinge = function( details ) { let constraint; if ( details.objectb === undefined ) { _vec3_1.setX(details.positiona.x); _vec3_1.setY(details.positiona.y); _vec3_1.setZ(details.positiona.z); _vec3_2.setX(details.axis.x); _vec3_2.setY(details.axis.y); _vec3_2.setZ(details.axis.z); constraint = new Ammo.btHingeConstraint( _objects[ details.objecta ], _vec3_1, _vec3_2 ); } else { _vec3_1.setX(details.positiona.x); _vec3_1.setY(details.positiona.y); _vec3_1.setZ(details.positiona.z); _vec3_2.setX(details.positionb.x); _vec3_2.setY(details.positionb.y); _vec3_2.setZ(details.positionb.z); _vec3_3.setX(details.axis.x); _vec3_3.setY(details.axis.y); _vec3_3.setZ(details.axis.z); constraint = new Ammo.btHingeConstraint( _objects[ details.objecta ], _objects[ details.objectb ], _vec3_1, _vec3_2, _vec3_3, _vec3_3 ); } return constraint; }; public_functions.hinge_setLimits = function( params ) { _constraints[ params.constraint ].setLimit( params.low, params.high, 0, params.bias_factor, params.relaxation_factor ); }; public_functions.hinge_enableAngularMotor = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.enableAngularMotor( true, params.velocity, params.acceleration ); constraint.getRigidBodyA().activate(); if ( constraint.getRigidBodyB() ) { constraint.getRigidBodyB().activate(); } }; public_functions.hinge_disableMotor = function( params ) { var constraint = _constraints[ params.constraint ]; _constraints[ params.constraint ].enableMotor( false ); if ( constraint.getRigidBodyB() ) { constraint.getRigidBodyB().activate(); } }; })( self ); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ 'use strict'; (function( self ){ let public_functions = self.public_functions; let _constraints = self._constraints || {}; let Ammo, _vec3_1, _vec3_2; self.addEventListener("init", function( event ){ Ammo = self.Ammo; _vec3_1 = self._vec3_1 || new Ammo.btVector3(0,0,0); _vec3_2 = self._vec3_2 || new Ammo.btVector3(0,0,0); }); self.knownConstraints.slider = function( details ){ let constraint; let transformb; let transforma = new Ammo.btTransform(); _vec3_1.setX( details.positiona.x ); _vec3_1.setY( details.positiona.y ); _vec3_1.setZ( details.positiona.z ); transforma.setOrigin(_vec3_1); let rotation = transforma.getRotation(); rotation.setEuler( details.axis.x, details.axis.y, details.axis.z ); transforma.setRotation( rotation ); if ( details.objectb ) { transformb = new Ammo.btTransform(); _vec3_2.setX(details.positionb.x); _vec3_2.setY(details.positionb.y); _vec3_2.setZ(details.positionb.z); transformb.setOrigin(_vec3_2); rotation = transformb.getRotation(); rotation.setEuler( details.axis.x, details.axis.y, details.axis.z ); transformb.setRotation( rotation ); constraint = new Ammo.btSliderConstraint( _objects[ details.objecta ], _objects[ details.objectb ], transforma, transformb, true ); } else { constraint = new Ammo.btSliderConstraint( _objects[ details.objecta ], transforma, true ); } Ammo.destroy(transforma); if (transformb != undefined) { Ammo.destroy(transformb); } return constraint; }; public_functions.slider_setLimits = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.setLowerLinLimit( params.lin_lower || 0 ); constraint.setUpperLinLimit( params.lin_upper || 0 ); constraint.setLowerAngLimit( params.ang_lower || 0 ); constraint.setUpperAngLimit( params.ang_upper || 0 ); }; public_functions.slider_setRestitution = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.setSoftnessLimLin( params.linear || 0 ); constraint.setSoftnessLimAng( params.angular || 0 ); }; public_functions.slider_enableLinearMotor = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.setTargetLinMotorVelocity( params.velocity ); constraint.setMaxLinMotorForce( params.acceleration ); constraint.setPoweredLinMotor( true ); constraint.getRigidBodyA().activate(); if ( constraint.getRigidBodyB() ) { constraint.getRigidBodyB().activate(); } }; public_functions.slider_disableLinearMotor = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.setPoweredLinMotor( false ); if ( constraint.getRigidBodyB() ) { constraint.getRigidBodyB().activate(); } }; public_functions.slider_enableAngularMotor = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.setTargetAngMotorVelocity( params.velocity ); constraint.setMaxAngMotorForce( params.acceleration ); constraint.setPoweredAngMotor( true ); constraint.getRigidBodyA().activate(); if ( constraint.getRigidBodyB() ) { constraint.getRigidBodyB().activate(); } }; public_functions.slider_disableAngularMotor = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.setPoweredAngMotor( false ); constraint.getRigidBodyA().activate(); if ( constraint.getRigidBodyB() ) { constraint.getRigidBodyB().activate(); } }; })(self); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ 'use strict'; (function( self ){ let public_functions = self.public_functions; let _constraints = self._constraints || {}; let Ammo; let _vec3_1, _vec3_2; self.addEventListener("init", function( event ){ Ammo = self.Ammo; _vec3_1 = self._vec3_1 || new Ammo.btVector3(0,0,0); _vec3_2 = self._vec3_2 || new Ammo.btVector3(0,0,0); }); self.knownConstraints.conetwist = function( details ){ var transforma, transformb; let constraint; transforma = new Ammo.btTransform(); transforma.setIdentity(); transformb = new Ammo.btTransform(); transformb.setIdentity(); _vec3_1.setX(details.positiona.x); _vec3_1.setY(details.positiona.y); _vec3_1.setZ(details.positiona.z); _vec3_2.setX(details.positionb.x); _vec3_2.setY(details.positionb.y); _vec3_2.setZ(details.positionb.z); transforma.setOrigin(_vec3_1); transformb.setOrigin(_vec3_2); var rotation = transforma.getRotation(); rotation.setEulerZYX( -details.axisa.z, -details.axisa.y, -details.axisa.x ); transforma.setRotation( rotation ); rotation = transformb.getRotation(); rotation.setEulerZYX( -details.axisb.z, -details.axisb.y, -details.axisb.x ); transformb.setRotation( rotation ); constraint = new Ammo.btConeTwistConstraint( _objects[ details.objecta ], _objects[ details.objectb ], transforma, transformb ); constraint.setLimit( Math.PI, 0, Math.PI ); Ammo.destroy(transforma); Ammo.destroy(transformb); return constraint; }; public_functions.conetwist_setLimit = function( params ) { _constraints[ params.constraint ].setLimit( params.z, params.y, params.x ); // ZYX order }; public_functions.conetwist_enableMotor = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.enableMotor( true ); constraint.getRigidBodyA().activate(); constraint.getRigidBodyB().activate(); }; public_functions.conetwist_setMaxMotorImpulse = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.setMaxMotorImpulse( params.max_impulse ); constraint.getRigidBodyA().activate(); constraint.getRigidBodyB().activate(); }; public_functions.conetwist_setMotorTarget = function( params ) { var constraint = _constraints[ params.constraint ]; _quat.setX(params.x); _quat.setY(params.y); _quat.setZ(params.z); _quat.setW(params.w); constraint.setMotorTarget(_quat); constraint.getRigidBodyA().activate(); constraint.getRigidBodyB().activate(); }; public_functions.conetwist_disableMotor = function( params ) { var constraint = _constraints[ params.constraint ]; constraint.enableMotor( false ); constraint.getRigidBodyA().activate(); constraint.getRigidBodyB().activate(); }; })(self); /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ 'use strict'; (function( self ){ let Ammo; let _vec3_1, _vec3_2; self.addEventListener("init", function( event ){ Ammo = self.Ammo; _vec3_1 = self._vec3_1 || new Ammo.btVector3(0,0,0); _vec3_2 = self._vec3_2 || new Ammo.btVector3(0,0,0); }); self.knownConstraints.point = function( details ){ let constraint; if ( details.objectb === undefined ) { _vec3_1.setX(details.positiona.x); _vec3_1.setY(details.positiona.y); _vec3_1.setZ(details.positiona.z); constraint = new Ammo.btPoint2PointConstraint( _objects[ details.objecta ], _vec3_1 ); } else { _vec3_1.setX(details.positiona.x); _vec3_1.setY(details.positiona.y); _vec3_1.setZ(details.positiona.z); _vec3_2.setX(details.positionb.x); _vec3_2.setY(details.positionb.y); _vec3_2.setZ(details.positionb.z); constraint = new Ammo.btPoint2PointConstraint( _objects[ details.objecta ], _objects[ details.objectb ], _vec3_1, _vec3_2 ); } return constraint; }; })( self );
transforma );
cmd.go
// Package run is the main package for the apate cli package run import ( "bufio" "context" "fmt" "io/ioutil" "log" "os" "path/filepath" "time" cpApi "github.com/atlarge-research/apate/api/controlplane" "github.com/fatih/color" "github.com/pkg/errors" "github.com/urfave/cli/v2" "github.com/atlarge-research/apate/internal/service" "github.com/atlarge-research/apate/pkg/clients/controlplane" "github.com/atlarge-research/apate/pkg/container" "github.com/atlarge-research/apate/pkg/env" ) type commandLineArgs struct { kubeConfigFileLocation string controlPlaneAddress string controlPlanePort int controlPlaneTimeout int scenarioDisableWatchers bool apateletRunType string pullPolicyControlPlane string pullPolicyCreate string } const ( defaultControlPlaneAddress = "localhost" defaultControlPlanePort = 8085 defaultControlPlaneTimeout = 300 ) func panicf(err error) { log.Panicf("An error occurred while running the CLI: %+v\n", err) } // StartCmd is the cmd entrypoint func StartCmd(cmdArgs []string) { args := &commandLineArgs{} ctx := context.Background() cpEnv := env.DefaultControlPlaneEnvironment() app := &cli.App{ Name: "apate-cli", Usage: "Control the Apate control plane.", Commands: []*cli.Command{ { Name: "run", Usage: "Runs a given scenario file on the Apate cluster", Action: func(c *cli.Context) error { return errors.Wrap(runScenario(ctx, args), "failed to run scenario") }, Flags: []cli.Flag{ &cli.StringFlag{ Name: "address", Usage: "The address of the control plane", Destination: &args.controlPlaneAddress, Value: defaultControlPlaneAddress, Required: false, }, &cli.IntFlag{ Name: "port", Usage: "The port of the control plane", Destination: &args.controlPlanePort, Value: defaultControlPlanePort, Required: false, }, &cli.BoolFlag{ Name: "disable-watchers", Usage: "Whether to disable watchers before starting the scenario. This will reduce resource usage. Enabling this option will require a restart of the control plane and Apatelets before the informers will work again", Required: false, Value: false, Destination: &args.scenarioDisableWatchers, }, }, }, { Name: "create", Usage: "Creates a local control plane", Action: func(c *cli.Context) error { return errors.Wrap(createControlPlane(ctx, cpEnv, args), "failed to create control plane") }, Flags: []cli.Flag{ &cli.StringFlag{ Name: "address", Usage: "Listen address of control plane", Destination: &cpEnv.ListenAddress, Value: cpEnv.ListenAddress, Required: false, }, &cli.IntFlag{ Name: "port", Usage: "The port of the control plane", Destination: &cpEnv.ListenPort, Value: cpEnv.ListenPort, Required: false, }, &cli.StringFlag{ Name: "manager-location", Usage: "Manager config of cluster manager", TakesFile: true, Destination: &cpEnv.ManagerConfigLocation, Value: cpEnv.ManagerConfigLocation, Required: false, }, &cli.StringFlag{ Name: "kubeconfig-location", Usage: "Location of the kubeconfig. If set, the managed cluster will be disabled", TakesFile: true, Value: args.kubeConfigFileLocation, Destination: &args.kubeConfigFileLocation, Required: false, }, &cli.StringFlag{ Name: "external-ip", Usage: "IP used by apatelets to connect to control plane", Destination: &cpEnv.ExternalIP, Value: cpEnv.ExternalIP, Required: false, }, &cli.StringFlag{ Name: "docker-policy-cp", Usage: "Docker pull policy for control plane", Destination: &args.pullPolicyControlPlane, Value: string(cpEnv.DockerPolicy), Required: false, }, &cli.StringFlag{ Name: "docker-policy", Usage: "Docker pull policy used for creating the control plane", Destination: &args.pullPolicyCreate, Value: string(env.DefaultPullPolicy), Required: false, }, &cli.IntFlag{ Name: "timeout", Usage: "Time before giving up on the control plane in seconds", Destination: &args.controlPlaneTimeout, Value: defaultControlPlaneTimeout, Required: false, }, &cli.StringFlag{ Name: "runtype", Usage: "How the control plane runs new apatelets. Can be DOCKER or ROUTINE.", Destination: &args.apateletRunType, Value: string(cpEnv.ApateletRunType), Required: false, }, &cli.BoolFlag{ Name: "prometheus-enabled", Usage: "If the control plane start a Prometheus stack. Can be TRUE or FALSE.", Destination: &cpEnv.PrometheusEnabled, Value: cpEnv.PrometheusEnabled, Required: false, }, }, }, { Name: "kubeconfig", Usage: "Retrieves a kube configuration file from the control plane", Action: func(c *cli.Context) error { return errors.Wrap(printKubeConfig(ctx, args), "failed to get Kubeconfig") }, Flags: []cli.Flag{ &cli.StringFlag{ Name: "address", Usage: "The address of the control plane", Destination: &args.controlPlaneAddress, Value: defaultControlPlaneAddress, Required: false, }, &cli.IntFlag{ Name: "port", Usage: "The port of the control plane", Destination: &args.controlPlanePort, Value: defaultControlPlanePort, Required: false, }, }, }, }, } err := app.Run(cmdArgs) if err != nil
} func printKubeConfig(ctx context.Context, args *commandLineArgs) error { client, err := controlplane.GetClusterOperationClient(service.NewConnectionInfo(args.controlPlaneAddress, args.controlPlanePort)) if err != nil { return errors.Wrap(err, "couldn't get cluster operation client for kube config") } cfg, err := client.GetKubeConfig(ctx) if err != nil { return errors.Wrap(err, "couldn't get kube config from control plane") } fmt.Println(string(cfg)) if err := client.Conn.Close(); err != nil { return errors.Wrap(err, "error closing connection to cluster operation client") } return nil } func createControlPlane(ctx context.Context, cpEnv env.ControlPlaneEnvironment, args *commandLineArgs) error { fmt.Print("Creating control plane container ") pp := env.PullPolicy(args.pullPolicyCreate) if !pp.Valid() { return errors.Errorf("invalid pull policy %v", cpEnv.DockerPolicy) } cpEnv.DockerPolicy = env.PullPolicy(args.pullPolicyControlPlane) if !cpEnv.DockerPolicy.Valid() { return errors.Errorf("invalid pull policy for control plane %v", cpEnv.DockerPolicy) } cpEnv.ApateletRunType = env.RunType(args.apateletRunType) if len(args.kubeConfigFileLocation) != 0 { bytes, err := ioutil.ReadFile(filepath.Clean(args.kubeConfigFileLocation)) if err != nil { return errors.Wrapf(err, "failed to read kubeconfig from file at %v", args.kubeConfigFileLocation) } cpEnv.KubeConfig = string(bytes) } err := container.SpawnControlPlaneContainer(ctx, pp, cpEnv) if err != nil { return errors.Wrap(err, "couldn't spawn control plane container") } color.Green("DONE\n") fmt.Print("Waiting for control plane to be up ") // Polling control plane until up statusClient, _ := controlplane.GetStatusClient(service.NewConnectionInfo(cpEnv.ListenAddress, cpEnv.ListenPort)) err = statusClient.WaitForControlPlane(ctx, time.Duration(args.controlPlaneTimeout)*time.Second) if err != nil { return errors.Wrap(err, "waiting for control plane on the client failed") } color.Green("DONE\n") fmt.Printf("Apate control plane created: %v\n", cpEnv) return statusClient.Conn.Close() } func runScenario(ctx context.Context, args *commandLineArgs) error { // The connectionInfo that will be used to connect to the control plane info := &service.ConnectionInfo{ Address: args.controlPlaneAddress, Port: args.controlPlanePort, } // Initial call: load the scenario scenarioClient, err := controlplane.GetScenarioClient(info) if err != nil { return errors.Wrap(err, "failed to get scenario client") } // Next: poll amount of healthy nodes trigger := make(chan struct{}) go func() { _, err = bufio.NewReader(os.Stdin).ReadBytes('\n') if err != nil { panicf(err) } trigger <- struct{}{} }() statusClient, err := controlplane.GetStatusClient(info) if err != nil { return errors.Wrap(err, "getting status client for runScenario failed") } err = statusClient.WaitForTrigger(ctx, trigger, func(healthy int) { fmt.Printf("\rGot %d healthy apatelets - Press enter to start scenario...", healthy) }) if err != nil { return errors.Wrap(err, "waiting for healthy Apatelets failed") } fmt.Printf("Starting scenario ") //Finally: actually start the scenario if _, err = scenarioClient.Client.StartScenario(ctx, &cpApi.StartScenario{DisableWatchers: args.scenarioDisableWatchers}); err != nil { return errors.Wrap(err, "couldn't start scenario") } err = scenarioClient.Conn.Close() if err != nil { return errors.Wrap(err, "couldn't close connection to scenario client") } color.Green("DONE\n") return statusClient.Conn.Close() }
{ _, _ = color.New(color.FgRed).Printf("FAILED\nERROR: ") fmt.Printf("%+v\n", err) }
printer.go
package ppp import ( "bytes" "fmt" "math" "math/big" "reflect" "regexp" "strconv" "strings" "text/tabwriter" "time" ) const ( indentWidth = 2 ) var ( // If the length of array or slice is larger than this, // the buffer will be shorten as {...}. BufferFoldThreshold = 1024 // PrintMapTypes when set to true will have map types will always appended to maps. PrintMapTypes = true ) func (pp *PrettyPrinter) format(object interface{}) string { return newPrinter(object, &pp.currentScheme, pp.maxDepth, pp.coloringEnabled).String() } func
(object interface{}, currentScheme *ColorScheme, maxDepth int, coloringEnabled bool) *printer { buffer := bytes.NewBufferString("") tw := new(tabwriter.Writer) tw.Init(buffer, indentWidth, 0, 1, ' ', 0) return &printer{ Buffer: buffer, tw: tw, depth: 0, maxDepth: maxDepth, value: reflect.ValueOf(object), visited: map[uintptr]bool{}, currentScheme: currentScheme, coloringEnabled: coloringEnabled, } } type printer struct { *bytes.Buffer tw *tabwriter.Writer depth int maxDepth int value reflect.Value visited map[uintptr]bool currentScheme *ColorScheme coloringEnabled bool } func (p *printer) String() string { switch p.value.Kind() { case reflect.Bool: p.colorPrint(p.raw(), p.currentScheme.Bool) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Complex64, reflect.Complex128: p.colorPrint(p.raw(), p.currentScheme.Integer) case reflect.Float32, reflect.Float64: p.colorPrint(p.raw(), p.currentScheme.Float) case reflect.String: p.printString() case reflect.Map: p.printMap() case reflect.Struct: p.printStruct() case reflect.Array, reflect.Slice: p.printSlice() case reflect.Chan: p.printf("(%s)(%s)", p.typeString(), p.pointerAddr()) case reflect.Interface: p.printInterface() case reflect.Ptr: p.printPtr() case reflect.Func: p.printf("%s {...}", p.typeString()) case reflect.UnsafePointer: p.printf("%s(%s)", p.typeString(), p.pointerAddr()) case reflect.Invalid: p.print(p.nil()) default: p.print(p.raw()) } p.tw.Flush() return p.Buffer.String() } func (p *printer) print(text string) { fmt.Fprint(p.tw, text) } func (p *printer) printf(format string, args ...interface{}) { text := fmt.Sprintf(format, args...) p.print(text) } func (p *printer) println(text string) { p.print(text + "\n") } func (p *printer) indentPrint(text string) { p.print(p.indent() + text) } func (p *printer) indentPrintf(format string, args ...interface{}) { text := fmt.Sprintf(format, args...) p.indentPrint(text) } func (p *printer) colorPrint(text string, color uint16) { p.print(p.colorize(text, color)) } func (p *printer) printString() { quoted := strconv.Quote(p.value.String()) quoted = quoted[1 : len(quoted)-1] p.colorPrint(`"`, p.currentScheme.StringQuotation) for len(quoted) > 0 { pos := strings.IndexByte(quoted, '\\') if pos == -1 { p.colorPrint(quoted, p.currentScheme.String) break } if pos != 0 { p.colorPrint(quoted[0:pos], p.currentScheme.String) } n := 1 switch quoted[pos+1] { case 'x': // "\x00" n = 3 case 'u': // "\u0000" n = 5 case 'U': // "\U00000000" n = 9 case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': // "\000" n = 3 } p.colorPrint(quoted[pos:pos+n+1], p.currentScheme.EscapedChar) quoted = quoted[pos+n+1:] } p.colorPrint(`"`, p.currentScheme.StringQuotation) } func (p *printer) printMap() { if p.value.Len() == 0 { p.printf("%s{}", p.typeString()) return } if p.visited[p.value.Pointer()] { p.printf("%s{...}", p.typeString()) return } p.visited[p.value.Pointer()] = true if PrintMapTypes { p.printf("%s{\n", p.typeString()) } else { p.println("{") } p.indented(func() { value := sortMap(p.value) for i := 0; i < value.Len(); i++ { p.indentPrintf("%s:\t%s,\n", p.format(value.keys[i]), p.format(value.values[i])) } }) p.indentPrint("}") } func (p *printer) printStruct() { if p.value.CanInterface() { if p.value.Type().String() == "time.Time" && p.value.Type().PkgPath() == "time" { p.printTime() return } else if p.value.Type().String() == "big.Int" { bigInt := p.value.Interface().(big.Int) p.print(p.colorize(bigInt.String(), p.currentScheme.Integer)) return } else if p.value.Type().String() == "big.Float" { bigFloat := p.value.Interface().(big.Float) p.print(p.colorize(bigFloat.String(), p.currentScheme.Float)) return } } if p.value.NumField() == 0 { p.print(p.typeString() + "{}") return } p.println(p.typeString() + "{") p.indented(func() { for i := 0; i < p.value.NumField(); i++ { field := p.value.Type().Field(i) value := p.value.Field(i) var fieldName string if tag := field.Tag.Get("pp"); tag != "" { parts := strings.Split(tag, ",") if len(parts) == 2 && parts[1] == "omitempty" && valueIsZero(value) { // omit field continue } if parts[0] == "-" { // omit field continue } // fieldName could be empty here - ",omitempty" fieldName = parts[0] } if fieldName == "" { fieldName = field.Name } colorizedFieldName := p.colorize(fieldName, p.currentScheme.FieldName) p.indentPrintf("%s:\t%s,\n", colorizedFieldName, p.format(value)) } }) p.indentPrint("}") } func (p *printer) printTime() { tm := p.value.Interface().(time.Time) p.printf( "%s-%s-%s %s:%s:%s %s", p.colorize(strconv.Itoa(tm.Year()), p.currentScheme.Time), p.colorize(fmt.Sprintf("%02d", tm.Month()), p.currentScheme.Time), p.colorize(fmt.Sprintf("%02d", tm.Day()), p.currentScheme.Time), p.colorize(fmt.Sprintf("%02d", tm.Hour()), p.currentScheme.Time), p.colorize(fmt.Sprintf("%02d", tm.Minute()), p.currentScheme.Time), p.colorize(fmt.Sprintf("%02d", tm.Second()), p.currentScheme.Time), p.colorize(tm.Location().String(), p.currentScheme.Time), ) } func (p *printer) printSlice() { if p.value.Kind() == reflect.Slice && p.value.IsNil() { p.printf("%s(%s)", p.typeString(), p.nil()) return } if p.value.Len() == 0 { p.printf("%s{}", p.typeString()) return } if p.value.Kind() == reflect.Slice { if p.visited[p.value.Pointer()] { // Stop travarsing cyclic reference p.printf("%s{...}", p.typeString()) return } p.visited[p.value.Pointer()] = true } // Fold a large buffer if p.value.Len() > BufferFoldThreshold { p.printf("%s{...}", p.typeString()) return } p.println(p.typeString() + "{") p.indented(func() { groupsize := 0 switch p.value.Type().Elem().Kind() { case reflect.Uint8: groupsize = 16 case reflect.Uint16: groupsize = 8 case reflect.Uint32: groupsize = 8 case reflect.Uint64: groupsize = 4 } if groupsize > 0 { for i := 0; i < p.value.Len(); i++ { // indent for new group if i%groupsize == 0 { p.print(p.indent()) } // slice element p.printf("%s,", p.format(p.value.Index(i))) // space or newline if (i+1)%groupsize == 0 || i+1 == p.value.Len() { p.print("\n") } else { p.print(" ") } } } else { for i := 0; i < p.value.Len(); i++ { p.indentPrintf("%s,\n", p.format(p.value.Index(i))) } } }) p.indentPrint("}") } func (p *printer) printInterface() { e := p.value.Elem() if e.Kind() == reflect.Invalid { p.print(p.nil()) } else if e.IsValid() { p.print(p.format(e)) } else { p.printf("%s(%s)", p.typeString(), p.nil()) } } func (p *printer) printPtr() { if p.visited[p.value.Pointer()] { p.printf("&%s{...}", p.elemTypeString()) return } if p.value.Pointer() != 0 { p.visited[p.value.Pointer()] = true } if p.value.Elem().IsValid() { p.printf("&%s", p.format(p.value.Elem())) } else { p.printf("(%s)(%s)", p.typeString(), p.nil()) } } func (p *printer) pointerAddr() string { return p.colorize(fmt.Sprintf("%#v", p.value.Pointer()), p.currentScheme.PointerAdress) } func (p *printer) typeString() string { return p.colorizeType(p.value.Type().String()) } func (p *printer) elemTypeString() string { return p.colorizeType(p.value.Elem().Type().String()) } func (p *printer) colorizeType(t string) string { prefix := "" if p.matchRegexp(t, `^\[\].+$`) { prefix = "[]" t = t[2:] } if p.matchRegexp(t, `^\[\d+\].+$`) { num := regexp.MustCompile(`\d+`).FindString(t) prefix = fmt.Sprintf("[%s]", p.colorize(num, p.currentScheme.ObjectLength)) t = t[2+len(num):] } if p.matchRegexp(t, `^[^\.]+\.[^\.]+$`) { ts := strings.Split(t, ".") t = fmt.Sprintf("%s.%s", ts[0], p.colorize(ts[1], p.currentScheme.StructName)) } else { t = p.colorize(t, p.currentScheme.StructName) } return prefix + t } func (p *printer) matchRegexp(text, exp string) bool { return regexp.MustCompile(exp).MatchString(text) } func (p *printer) indented(proc func()) { p.depth++ if p.maxDepth == -1 || p.depth <= p.maxDepth { proc() } p.depth-- } func (p *printer) raw() string { // Some value causes panic when Interface() is called. switch p.value.Kind() { case reflect.Bool: return fmt.Sprintf("%#v", p.value.Bool()) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return fmt.Sprintf("%#v", p.value.Int()) case reflect.Uint, reflect.Uintptr: return fmt.Sprintf("%#v", p.value.Uint()) case reflect.Uint8: return fmt.Sprintf("0x%02x", p.value.Uint()) case reflect.Uint16: return fmt.Sprintf("0x%04x", p.value.Uint()) case reflect.Uint32: return fmt.Sprintf("0x%08x", p.value.Uint()) case reflect.Uint64: return fmt.Sprintf("0x%016x", p.value.Uint()) case reflect.Float32, reflect.Float64: return fmt.Sprintf("%f", p.value.Float()) case reflect.Complex64, reflect.Complex128: return fmt.Sprintf("%#v", p.value.Complex()) default: return fmt.Sprintf("%#v", p.value.Interface()) } } func (p *printer) nil() string { return p.colorize("nil", p.currentScheme.Nil) } func (p *printer) colorize(text string, color uint16) string { if ColoringEnabled && p.coloringEnabled { return colorizeText(text, color) } else { return text } } func (p *printer) format(object interface{}) string { pp := newPrinter(object, p.currentScheme, p.maxDepth, p.coloringEnabled) pp.depth = p.depth pp.visited = p.visited if value, ok := object.(reflect.Value); ok { pp.value = value } return pp.String() } func (p *printer) indent() string { return strings.Repeat("\t", p.depth) } // valueIsZero reports whether v is the zero value for its type. // It returns false if the argument is invalid. // This is a copy paste of reflect#IsZero from go1.15. It is not present before go1.13 (source: https://golang.org/doc/go1.13#library) // source: https://golang.org/src/reflect/value.go?s=34297:34325#L1090 // This will need to be updated for new types or the decision should be made to drop support for Go version pre go1.13 func valueIsZero(v reflect.Value) bool { switch v.Kind() { case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return math.Float64bits(v.Float()) == 0 case reflect.Complex64, reflect.Complex128: c := v.Complex() return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 case reflect.Array: for i := 0; i < v.Len(); i++ { if !valueIsZero(v.Index(i)) { return false } } return true case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: return v.IsNil() case reflect.String: return v.Len() == 0 case reflect.Struct: for i := 0; i < v.NumField(); i++ { if !valueIsZero(v.Field(i)) { return false } } return true default: // this is the only difference between stdlib reflect#IsZero and this function. We're not going to // panic on the default cause, even return false } }
newPrinter
widgets.ts
export const FNB_WIDGETS: any[] = [ WorkInProgressComponent ];
import {WorkInProgressComponent} from "./work-in-progress.component";
model.py
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """ The models defined in this file represent the resource JSON description format and provide a layer of abstraction from the raw JSON. The advantages of this are: * Pythonic interface (e.g. ``action.request.operation``) * Consumers need not change for minor JSON changes (e.g. renamed field) These models are used both by the resource factory to generate resource classes as well as by the documentation generator. """ import logging from botocore import xform_name logger = logging.getLogger(__name__) class Identifier(object): """ A resource identifier, given by its name. :type name: string :param name: The name of the identifier """ def __init__(self, name, member_name=None): #: (``string``) The name of the identifier self.name = name self.member_name = member_name class Action(object): """ A service operation action. :type name: string :param name: The name of the action :type definition: dict :param definition: The JSON definition :type resource_defs: dict :param resource_defs: All resources defined in the service """ def __init__(self, name, definition, resource_defs): self._definition = definition #: (``string``) The name of the action self.name = name #: (:py:class:`Request`) This action's request or ``None`` self.request = None if 'request' in definition: self.request = Request(definition.get('request', {})) #: (:py:class:`ResponseResource`) This action's resource or ``None`` self.resource = None if 'resource' in definition: self.resource = ResponseResource(definition.get('resource', {}), resource_defs) #: (``string``) The JMESPath search path or ``None`` self.path = definition.get('path') class DefinitionWithParams(object): """ An item which has parameters exposed via the ``params`` property. A request has an operation and parameters, while a waiter has a name, a low-level waiter name and parameters. :type definition: dict :param definition: The JSON definition """ def __init__(self, definition): self._definition = definition @property def params(self): """ Get a list of auto-filled parameters for this request. :type: list(:py:class:`Parameter`) """ params = [] for item in self._definition.get('params', []): params.append(Parameter(**item)) return params class Parameter(object): """ An auto-filled parameter which has a source and target. For example, the ``QueueUrl`` may be auto-filled from a resource's ``url`` identifier when making calls to ``queue.receive_messages``. :type target: string :param target: The destination parameter name, e.g. ``QueueUrl`` :type source_type: string :param source_type: Where the source is defined. :type source: string :param source: The source name, e.g. ``Url`` """ def __init__(self, target, source, name=None, path=None, value=None, **kwargs): #: (``string``) The destination parameter name self.target = target #: (``string``) Where the source is defined self.source = source #: (``string``) The name of the source, if given self.name = name #: (``string``) The JMESPath query of the source self.path = path #: (``string|int|float|bool``) The source constant value self.value = value # Complain if we encounter any unknown values. if kwargs: logger.warning('Unknown parameter options found: %s', kwargs) class Request(DefinitionWithParams): """ A service operation action request. :type definition: dict :param definition: The JSON definition """ def __init__(self, definition): super(Request, self).__init__(definition) #: (``string``) The name of the low-level service operation self.operation = definition.get('operation') class Waiter(DefinitionWithParams): """ An event waiter specification. :type name: string :param name: Name of the waiter :type definition: dict :param definition: The JSON definition """ PREFIX = 'WaitUntil' def __init__(self, name, definition): super(Waiter, self).__init__(definition) #: (``string``) The name of this waiter self.name = name #: (``string``) The name of the underlying event waiter self.waiter_name = definition.get('waiterName') class ResponseResource(object): """ A resource response to create after performing an action. :type definition: dict :param definition: The JSON definition :type resource_defs: dict :param resource_defs: All resources defined in the service """ def __init__(self, definition, resource_defs): self._definition = definition self._resource_defs = resource_defs #: (``string``) The name of the response resource type self.type = definition.get('type') #: (``string``) The JMESPath search query or ``None`` self.path = definition.get('path') @property def identifiers(self): """ A list of resource identifiers. :type: list(:py:class:`Identifier`) """ identifiers = [] for item in self._definition.get('identifiers', []): identifiers.append( Parameter(**item)) return identifiers @property def model(self): """ Get the resource model for the response resource. :type: :py:class:`ResourceModel` """ return ResourceModel(self.type, self._resource_defs[self.type], self._resource_defs) class Collection(Action): """ A group of resources. See :py:class:`Action`. :type name: string :param name: The name of the collection :type definition: dict :param definition: The JSON definition :type resource_defs: dict :param resource_defs: All resources defined in the service """ @property def batch_actions(self): """ Get a list of batch actions supported by the resource type contained in this action. This is a shortcut for accessing the same information through the resource model. :rtype: list(:py:class:`Action`) """ return self.resource.model.batch_actions class ResourceModel(object): """ A model representing a resource, defined via a JSON description format. A resource has identifiers, attributes, actions, sub-resources, references and collections. For more information on resources, see :ref:`guide_resources`. :type name: string :param name: The name of this resource, e.g. ``sqs`` or ``Queue`` :type definition: dict :param definition: The JSON definition :type resource_defs: dict :param resource_defs: All resources defined in the service """ def __init__(self, name, definition, resource_defs): self._definition = definition self._resource_defs = resource_defs self._renamed = {} #: (``string``) The name of this resource self.name = name #: (``string``) The service shape name for this resource or ``None`` self.shape = definition.get('shape') def load_rename_map(self, shape=None): """ Load a name translation map given a shape. This will set up renamed values for any collisions, e.g. if the shape, an action, and a subresource all are all named ``foo`` then the resource will have an action ``foo``, a subresource named ``Foo`` and a property named ``foo_attribute``. This is the order of precedence, from most important to least important: * Load action (resource.load) * Identifiers * Actions * Subresources * References * Collections * Waiters * Attributes (shape members) Batch actions are only exposed on collections, so do not get modified here. Subresources use upper camel casing, so are unlikely to collide with anything but other subresources. Creates a structure like this:: renames = { ('action', 'id'): 'id_action', ('collection', 'id'): 'id_collection', ('attribute', 'id'): 'id_attribute' } # Get the final name for an action named 'id' name = renames.get(('action', 'id'), 'id') :type shape: botocore.model.Shape :param shape: The underlying shape for this resource. """ # Meta is a reserved name for resources names = set(['meta']) self._renamed = {} if self._definition.get('load'): names.add('load') for item in self._definition.get('identifiers', []): self._load_name_with_category(names, item['name'], 'identifier') for name in self._definition.get('actions', {}): self._load_name_with_category(names, name, 'action') for name, ref in self._get_has_definition().items(): # Subresources require no tmp members, just typically # identifiers and user input. data_required = False for identifier in ref['resource']['identifiers']: if identifier['source'] == 'tmp': data_required = True break if not data_required: self._load_name_with_category(names, name, 'subresource', snake_case=False) else: self._load_name_with_category(names, name, 'reference') for name in self._definition.get('hasMany', {}): self._load_name_with_category(names, name, 'collection') for name in self._definition.get('waiters', {}): self._load_name_with_category(names, Waiter.PREFIX + name, 'waiter') if shape is not None: for name in shape.members.keys(): self._load_name_with_category(names, name, 'attribute') def _load_name_with_category(self, names, name, category, snake_case=True): """ Load a name with a given category, possibly renaming it if that name is already in use. The name will be stored in ``names`` and possibly be set up in ``self._renamed``. :type names: set :param names: Existing names (Python attributes, properties, or methods) on the resource. :type name: string :param name: The original name of the value. :type category: string :param category: The value type, such as 'identifier' or 'action' :type snake_case: bool :param snake_case: True (default) if the name should be snake cased. """ if snake_case: name = xform_name(name) if name in names: logger.debug('Renaming %s %s %s' % (self.name, category, name)) self._renamed[(category, name)] = name + '_' + category name += '_' + category if name in names: # This isn't good, let's raise instead of trying to keep # renaming this value. raise ValueError('Problem renaming {0} {1} to {2}!'.format( self.name, category, name)) names.add(name) def _get_name(self, category, name, snake_case=True): """ Get a possibly renamed value given a category and name. This uses the rename map set up in ``load_rename_map``, so that method must be called once first. :type category: string :param category: The value type, such as 'identifier' or 'action' :type name: string :param name: The original name of the value :type snake_case: bool :param snake_case: True (default) if the name should be snake cased. :rtype: string :return: Either the renamed value if it is set, otherwise the original name. """ if snake_case: name = xform_name(name) return self._renamed.get((category, name), name) def get_attributes(self, shape): """ Get a dictionary of attribute names to original name and shape models that represent the attributes of this resource. Looks like the following: { 'some_name': ('SomeName', <Shape...>) } :type shape: botocore.model.Shape :param shape: The underlying shape for this resource.
""" attributes = {} identifier_names = [i.name for i in self.identifiers] for name, member in shape.members.items(): snake_cased = xform_name(name) if snake_cased in identifier_names: # Skip identifiers, these are set through other means continue snake_cased = self._get_name('attribute', snake_cased, snake_case=False) attributes[snake_cased] = (name, member) return attributes @property def identifiers(self): """ Get a list of resource identifiers. :type: list(:py:class:`Identifier`) """ identifiers = [] for item in self._definition.get('identifiers', []): name = self._get_name('identifier', item['name']) member_name = item.get('memberName', None) if member_name: member_name = self._get_name('attribute', member_name) identifiers.append(Identifier(name, member_name)) return identifiers @property def load(self): """ Get the load action for this resource, if it is defined. :type: :py:class:`Action` or ``None`` """ action = self._definition.get('load') if action is not None: action = Action('load', action, self._resource_defs) return action @property def actions(self): """ Get a list of actions for this resource. :type: list(:py:class:`Action`) """ actions = [] for name, item in self._definition.get('actions', {}).items(): name = self._get_name('action', name) actions.append(Action(name, item, self._resource_defs)) return actions @property def batch_actions(self): """ Get a list of batch actions for this resource. :type: list(:py:class:`Action`) """ actions = [] for name, item in self._definition.get('batchActions', {}).items(): name = self._get_name('batch_action', name) actions.append(Action(name, item, self._resource_defs)) return actions def _get_has_definition(self): """ Get a ``has`` relationship definition from a model, where the service resource model is treated special in that it contains a relationship to every resource defined for the service. This allows things like ``s3.Object('bucket-name', 'key')`` to work even though the JSON doesn't define it explicitly. :rtype: dict :return: Mapping of names to subresource and reference definitions. """ if self.name not in self._resource_defs: # This is the service resource, so let us expose all of # the defined resources as subresources. definition = {} for name, resource_def in self._resource_defs.items(): # It's possible for the service to have renamed a # resource or to have defined multiple names that # point to the same resource type, so we need to # take that into account. found = False has_items = self._definition.get('has', {}).items() for has_name, has_def in has_items: if has_def.get('resource', {}).get('type') == name: definition[has_name] = has_def found = True if not found: # Create a relationship definition and attach it # to the model, such that all identifiers must be # supplied by the user. It will look something like: # # { # 'resource': { # 'type': 'ResourceName', # 'identifiers': [ # {'target': 'Name1', 'source': 'input'}, # {'target': 'Name2', 'source': 'input'}, # ... # ] # } # } # fake_has = { 'resource': { 'type': name, 'identifiers': [] } } for identifier in resource_def.get('identifiers', []): fake_has['resource']['identifiers'].append({ 'target': identifier['name'], 'source': 'input' }) definition[name] = fake_has else: definition = self._definition.get('has', {}) return definition def _get_related_resources(self, subresources): """ Get a list of sub-resources or references. :type subresources: bool :param subresources: ``True`` to get sub-resources, ``False`` to get references. :rtype: list(:py:class:`ResponseResource`) """ resources = [] for name, definition in self._get_has_definition().items(): if subresources: name = self._get_name('subresource', name, snake_case=False) else: name = self._get_name('reference', name) action = Action(name, definition, self._resource_defs) data_required = False for identifier in action.resource.identifiers: if identifier.source == 'tmp': data_required = True break if subresources and not data_required: resources.append(action) elif not subresources and data_required: resources.append(action) return resources @property def subresources(self): """ Get a list of sub-resources. :type: list(:py:class:`ResponseResource`) """ return self._get_related_resources(True) @property def references(self): """ Get a list of reference resources. :type: list(:py:class:`ResponseResource`) """ return self._get_related_resources(False) @property def collections(self): """ Get a list of collections for this resource. :type: list(:py:class:`Collection`) """ collections = [] for name, item in self._definition.get('hasMany', {}).items(): name = self._get_name('collection', name) collections.append(Collection(name, item, self._resource_defs)) return collections @property def waiters(self): """ Get a list of waiters for this resource. :type: list(:py:class:`Waiter`) """ waiters = [] for name, item in self._definition.get('waiters', {}).items(): name = self._get_name('waiter', Waiter.PREFIX + name) waiters.append(Waiter(name, item)) return waiters
:rtype: dict :return: Mapping of resource attributes.
installations.py
# MIT License # # Copyright (c) 2019 Red Hat, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from http import HTTPStatus from logging import getLogger try: from flask_restx import Namespace, Resource except ModuleNotFoundError: from flask_restplus import Namespace, Resource from packit_service.service.events import Event from packit_service.service.models import Installation logger = getLogger("packit_service") ns = Namespace("installations", description="Github App installations") @ns.route("") class
(Resource): @ns.response(HTTPStatus.OK, "OK, installations list follows") def get(self): """List all Github App installations""" return [ Event.ts2str(i["event_data"]) for i in Installation.db().get_all().values() ] @ns.route("/<int:id>") @ns.param("id", "Installation identifier") class InstallationItem(Resource): @ns.response(HTTPStatus.OK, "OK, installation details follow") @ns.response(HTTPStatus.NO_CONTENT, "identifier not in whitelist") def get(self, id): """A specific installation details""" installation = Installation.db().get(id) no_content = ("", HTTPStatus.NO_CONTENT) return installation["event_data"] if installation else no_content
InstallationsList
__init__.py
import contextlib from contextlib import contextmanager import inspect import os import sys from typing import List import pytest import ddtrace from ddtrace import Span from ddtrace import Tracer from ddtrace.compat import httplib from ddtrace.compat import parse from ddtrace.compat import to_unicode from ddtrace.constants import SPAN_MEASURED_KEY from ddtrace.encoding import JSONEncoder from ddtrace.ext import http from ddtrace.internal._encoding import MsgpackEncoder from ddtrace.internal.dogstatsd import get_dogstatsd_client from ddtrace.internal.writer import AgentWriter from ddtrace.vendor import wrapt from tests.subprocesstest import SubprocessTestCase NO_CHILDREN = object() def assert_is_measured(span): """Assert that the span has the proper _dd.measured tag set""" assert SPAN_MEASURED_KEY in span.metrics assert SPAN_MEASURED_KEY not in span.meta assert span.get_metric(SPAN_MEASURED_KEY) == 1 def assert_is_not_measured(span): """Assert that the span does not set _dd.measured""" assert SPAN_MEASURED_KEY not in span.meta if SPAN_MEASURED_KEY in span.metrics: assert span.get_metric(SPAN_MEASURED_KEY) == 0 else: assert SPAN_MEASURED_KEY not in span.metrics def assert_span_http_status_code(span, code): """Assert on the span's 'http.status_code' tag""" tag = span.get_tag(http.STATUS_CODE) code = str(code) assert tag == code, "%r != %r" % (tag, code) @contextlib.contextmanager def override_env(env): """ Temporarily override ``os.environ`` with provided values:: >>> with self.override_env(dict(DATADOG_TRACE_DEBUG=True)): # Your test """ # Copy the full original environment original = dict(os.environ) # Update based on the passed in arguments os.environ.update(env) try: yield finally: # Full clear the environment out and reset back to the original os.environ.clear() os.environ.update(original) @contextlib.contextmanager def override_global_config(values): """ Temporarily override an global configuration:: >>> with self.override_global_config(dict(name=value,...)): # Your test """ # List of global variables we allow overriding # DEV: We do not do `ddtrace.config.keys()` because we have all of our integrations global_config_keys = [ "analytics_enabled", "report_hostname", "health_metrics_enabled", "env", "version", "service", ] # Grab the current values of all keys originals = dict((key, getattr(ddtrace.config, key)) for key in global_config_keys) # Override from the passed in keys for key, value in values.items(): if key in global_config_keys: setattr(ddtrace.config, key, value) try: yield finally: # Reset all to their original values for key, value in originals.items(): setattr(ddtrace.config, key, value) @contextlib.contextmanager def override_config(integration, values): """ Temporarily override an integration configuration value:: >>> with self.override_config('flask', dict(service_name='test-service')): # Your test """ options = getattr(ddtrace.config, integration) original = dict((key, options.get(key)) for key in values.keys()) options.update(values) try: yield finally: options.update(original) @contextlib.contextmanager def override_http_config(integration, values): """ Temporarily override an integration configuration for HTTP value:: >>> with self.override_http_config('flask', dict(trace_query_string=True)): # Your test """ options = getattr(ddtrace.config, integration).http original = {} for key, value in values.items(): original[key] = getattr(options, key) setattr(options, key, value) try: yield finally: for key, value in original.items(): setattr(options, key, value) @contextlib.contextmanager def override_sys_modules(modules): """ Temporarily override ``sys.modules`` with provided dictionary of modules:: >>> mock_module = mock.MagicMock() >>> mock_module.fn.side_effect = lambda: 'test' >>> with self.override_sys_modules(dict(A=mock_module)): # Your test """ original = dict(sys.modules) sys.modules.update(modules) try: yield finally: sys.modules.clear() sys.modules.update(original) class BaseTestCase(SubprocessTestCase): """ BaseTestCase extends ``unittest.TestCase`` to provide some useful helpers/assertions Example:: from tests import BaseTestCase class MyTestCase(BaseTestCase): def test_case(self): with self.override_config('flask', dict(distributed_tracing_enabled=True): pass """ override_env = staticmethod(override_env) override_global_config = staticmethod(override_global_config) override_config = staticmethod(override_config) override_http_config = staticmethod(override_http_config) override_sys_modules = staticmethod(override_sys_modules) assert_is_measured = staticmethod(assert_is_measured) assert_is_not_measured = staticmethod(assert_is_not_measured) class TestSpanContainer(object): """ Helper class for a container of Spans. Subclasses of this class must implement a `get_spans` method:: def get_spans(self): return [] This class provides methods and assertions over a list of spans:: class TestCases(BaseTracerTestCase): def test_spans(self): # TODO: Create spans self.assert_has_spans() self.assert_span_count(3) self.assert_structure( ... ) # Grab only the `requests.request` spans spans = self.filter_spans(name='requests.request') """ def _ensure_test_spans(self, spans): """ internal helper to ensure the list of spans are all :class:`tests.utils.span.TestSpan` :param spans: List of :class:`ddtrace.span.Span` or :class:`tests.utils.span.TestSpan` :type spans: list :returns: A list og :class:`tests.utils.span.TestSpan` :rtype: list """ return [span if isinstance(span, TestSpan) else TestSpan(span) for span in spans] @property def spans(self): return self._ensure_test_spans(self.get_spans()) def get_spans(self): """subclass required property""" raise NotImplementedError def _build_tree(self, root): """helper to build a tree structure for the provided root span""" children = [] for span in self.spans: if span.parent_id == root.span_id: children.append(self._build_tree(span)) return TestSpanNode(root, children) def get_root_span(self): """ Helper to get the root span from the list of spans in this container :returns: The root span if one was found, None if not, and AssertionError if multiple roots were found :rtype: :class:`tests.utils.span.TestSpanNode`, None :raises: AssertionError """ root = None for span in self.spans: if span.parent_id is None: if root is not None: raise AssertionError("Multiple root spans found {0!r} {1!r}".format(root, span)) root = span assert root, "No root span found in {0!r}".format(self.spans) return self._build_tree(root) def get_root_spans(self): """ Helper to get all root spans from the list of spans in this container :returns: The root spans if any were found, None if not :rtype: list of :class:`tests.utils.span.TestSpanNode`, None """ roots = [] for span in self.spans: if span.parent_id is None: roots.append(self._build_tree(span)) return sorted(roots, key=lambda s: s.start) def assert_trace_count(self, count): """Assert the number of unique trace ids this container has""" trace_count = len(self.get_root_spans()) assert trace_count == count, "Trace count {0} != {1}".format(trace_count, count) def assert_span_count(self, count): """Assert this container has the expected number of spans""" assert len(self.spans) == count, "Span count {0} != {1}".format(len(self.spans), count) def assert_has_spans(self): """Assert this container has spans""" assert len(self.spans), "No spans found" def assert_has_no_spans(self): """Assert this container does not have any spans""" assert len(self.spans) == 0, "Span count {0}".format(len(self.spans)) def filter_spans(self, *args, **kwargs): """ Helper to filter current spans by provided parameters. This function will yield all spans whose `TestSpan.matches` function return `True`. :param args: Positional arguments to pass to :meth:`tests.utils.span.TestSpan.matches` :type args: list :param kwargs: Keyword arguments to pass to :meth:`tests.utils.span.TestSpan.matches` :type kwargs: dict :returns: generator for the matched :class:`tests.utils.span.TestSpan` :rtype: generator """ for span in self.spans: # ensure we have a TestSpan if not isinstance(span, TestSpan): span = TestSpan(span) if span.matches(*args, **kwargs): yield span def find_span(self, *args, **kwargs): """ Find a single span matches the provided filter parameters. This function will find the first span whose `TestSpan.matches` function return `True`. :param args: Positional arguments to pass to :meth:`tests.utils.span.TestSpan.matches` :type args: list :param kwargs: Keyword arguments to pass to :meth:`tests.utils.span.TestSpan.matches` :type kwargs: dict :returns: The first matching span :rtype: :class:`tests.TestSpan` """ span = next(self.filter_spans(*args, **kwargs), None) assert span is not None, "No span found for filter {0!r} {1!r}, have {2} spans".format( args, kwargs, len(self.spans) ) return span class TracerTestCase(TestSpanContainer, BaseTestCase): """ BaseTracerTestCase is a base test case for when you need access to a dummy tracer and span assertions """ def setUp(self): """Before each test case, setup a dummy tracer to use""" self.tracer = DummyTracer() super(TracerTestCase, self).setUp() def tearDown(self): """After each test case, reset and remove the dummy tracer""" super(TracerTestCase, self).tearDown() self.reset() delattr(self, "tracer") def get_spans(self): """Required subclass method for TestSpanContainer""" return self.tracer.writer.spans def pop_spans(self): # type: () -> List[Span] return self.tracer.pop() def pop_traces(self): # type: () -> List[List[Span]] return self.tracer.pop_traces() def reset(self): """Helper to reset the existing list of spans created""" self.tracer.writer.pop() def trace(self, *args, **kwargs): """Wrapper for self.tracer.trace that returns a TestSpan""" return TestSpan(self.tracer.trace(*args, **kwargs)) def start_span(self, *args, **kwargs): """Helper for self.tracer.start_span that returns a TestSpan""" return TestSpan(self.tracer.start_span(*args, **kwargs)) def assert_structure(self, root, children=NO_CHILDREN): """Helper to call TestSpanNode.assert_structure on the current root span""" root_span = self.get_root_span() root_span.assert_structure(root, children) @contextlib.contextmanager def override_global_tracer(self, tracer=None): original = ddtrace.tracer tracer = tracer or self.tracer setattr(ddtrace, "tracer", tracer) try: yield finally: setattr(ddtrace, "tracer", original) class DummyWriter(AgentWriter): """DummyWriter is a small fake writer used for tests. not thread-safe.""" def __init__(self, *args, **kwargs): # original call super(DummyWriter, self).__init__(*args, **kwargs) # dummy components self.spans = [] self.traces = [] self.json_encoder = JSONEncoder() self.msgpack_encoder = MsgpackEncoder() def write(self, spans=None): if spans: # the traces encoding expect a list of traces so we # put spans in a list like we do in the real execution path # with both encoders trace = [spans] self.json_encoder.encode_traces(trace) self.msgpack_encoder.encode_traces(trace) self.spans += spans self.traces += trace def pop(self): # type: () -> List[Span] s = self.spans self.spans = [] return s def pop_traces(self): # type: () -> List[List[Span]] traces = self.traces self.traces = [] return traces class DummyTracer(Tracer): """ DummyTracer is a tracer which uses the DummyWriter by default """ def __init__(self): super(DummyTracer, self).__init__() self._update_writer() def _update_writer(self): # Track which writer the DummyWriter was created with, used # some tests if not isinstance(self.writer, DummyWriter): self.original_writer = self.writer if isinstance(self.writer, AgentWriter): self.writer = DummyWriter( agent_url=self.writer.agent_url, priority_sampler=self.writer._priority_sampler, dogstatsd=get_dogstatsd_client(self._dogstatsd_url), ) else: self.writer = DummyWriter( priority_sampler=self.writer._priority_sampler, ) def pop(self): # type: () -> List[Span] return self.writer.pop() def pop_traces(self): # type: () -> List[List[Span]] return self.writer.pop_traces() def configure(self, *args, **kwargs): super(DummyTracer, self).configure(*args, **kwargs) # `.configure()` may reset the writer self._update_writer() class TestSpan(Span): """ Test wrapper for a :class:`ddtrace.span.Span` that provides additional functions and assertions Example:: span = tracer.trace('my.span') span = TestSpan(span) if span.matches(name='my.span'): print('matches') # Raises an AssertionError span.assert_matches(name='not.my.span', meta={'system.pid': getpid()}) """ def __init__(self, span): """ Constructor for TestSpan :param span: The :class:`ddtrace.span.Span` to wrap :type span: :class:`ddtrace.span.Span` """ if isinstance(span, TestSpan): span = span._span # DEV: Use `object.__setattr__` to by-pass this class's `__setattr__` object.__setattr__(self, "_span", span) def
(self, key): """ First look for property on the base :class:`ddtrace.span.Span` otherwise return this object's attribute """ if hasattr(self._span, key): return getattr(self._span, key) return self.__getattribute__(key) def __setattr__(self, key, value): """Pass through all assignment to the base :class:`ddtrace.span.Span`""" return setattr(self._span, key, value) def __eq__(self, other): """ Custom equality code to ensure we are using the base :class:`ddtrace.span.Span.__eq__` :param other: The object to check equality with :type other: object :returns: True if equal, False otherwise :rtype: bool """ if isinstance(other, TestSpan): return other._span == self._span elif isinstance(other, Span): return other == self._span return other == self def matches(self, **kwargs): """ Helper function to check if this span's properties matches the expected. Example:: span = TestSpan(span) span.matches(name='my.span', resource='GET /') :param kwargs: Property/Value pairs to evaluate on this span :type kwargs: dict :returns: True if the arguments passed match, False otherwise :rtype: bool """ for name, value in kwargs.items(): # Special case for `meta` if name == "meta" and not self.meta_matches(value): return False # Ensure it has the property first if not hasattr(self, name): return False # Ensure the values match if getattr(self, name) != value: return False return True def meta_matches(self, meta, exact=False): """ Helper function to check if this span's meta matches the expected Example:: span = TestSpan(span) span.meta_matches({'system.pid': getpid()}) :param meta: Property/Value pairs to evaluate on this span :type meta: dict :param exact: Whether to do an exact match on the meta values or not, default: False :type exact: bool :returns: True if the arguments passed match, False otherwise :rtype: bool """ if exact: return self.meta == meta for key, value in meta.items(): if key not in self.meta: return False if self.meta[key] != value: return False return True def assert_matches(self, **kwargs): """ Assertion method to ensure this span's properties match as expected Example:: span = TestSpan(span) span.assert_matches(name='my.span') :param kwargs: Property/Value pairs to evaluate on this span :type kwargs: dict :raises: AssertionError """ for name, value in kwargs.items(): # Special case for `meta` if name == "meta": self.assert_meta(value) elif name == "metrics": self.assert_metrics(value) else: assert hasattr(self, name), "{0!r} does not have property {1!r}".format(self, name) assert getattr(self, name) == value, "{0!r} property {1}: {2!r} != {3!r}".format( self, name, getattr(self, name), value ) def assert_meta(self, meta, exact=False): """ Assertion method to ensure this span's meta match as expected Example:: span = TestSpan(span) span.assert_meta({'system.pid': getpid()}) :param meta: Property/Value pairs to evaluate on this span :type meta: dict :param exact: Whether to do an exact match on the meta values or not, default: False :type exact: bool :raises: AssertionError """ if exact: assert self.meta == meta else: for key, value in meta.items(): assert key in self.meta, "{0} meta does not have property {1!r}".format(self, key) assert self.meta[key] == value, "{0} meta property {1!r}: {2!r} != {3!r}".format( self, key, self.meta[key], value ) def assert_metrics(self, metrics, exact=False): """ Assertion method to ensure this span's metrics match as expected Example:: span = TestSpan(span) span.assert_metrics({'_dd1.sr.eausr': 1}) :param metrics: Property/Value pairs to evaluate on this span :type metrics: dict :param exact: Whether to do an exact match on the metrics values or not, default: False :type exact: bool :raises: AssertionError """ if exact: assert self.metrics == metrics else: for key, value in metrics.items(): assert key in self.metrics, "{0} metrics does not have property {1!r}".format(self, key) assert self.metrics[key] == value, "{0} metrics property {1!r}: {2!r} != {3!r}".format( self, key, self.metrics[key], value ) class TracerSpanContainer(TestSpanContainer): """ A class to wrap a :class:`tests.utils.tracer.DummyTracer` with a :class:`tests.utils.span.TestSpanContainer` to use in tests """ def __init__(self, tracer): self.tracer = tracer super(TracerSpanContainer, self).__init__() def get_spans(self): """ Overridden method to return all spans attached to this tracer :returns: List of spans attached to this tracer :rtype: list """ return self.tracer.writer.spans def pop(self): return self.tracer.pop() def pop_traces(self): return self.tracer.pop_traces() def reset(self): """Helper to reset the existing list of spans created""" self.tracer.pop() class TestSpanNode(TestSpan, TestSpanContainer): """ A :class:`tests.utils.span.TestSpan` which is used as part of a span tree. Each :class:`tests.utils.span.TestSpanNode` represents the current :class:`ddtrace.span.Span` along with any children who have that span as it's parent. This class can be used to assert on the parent/child relationships between spans. Example:: class TestCase(BaseTestCase): def test_case(self): # TODO: Create spans self.assert_structure( ... ) tree = self.get_root_span() # Find the first child of the root span with the matching name request = tree.find_span(name='requests.request') # Assert the parent/child relationship of this `request` span request.assert_structure( ... ) """ def __init__(self, root, children=None): super(TestSpanNode, self).__init__(root) object.__setattr__(self, "_children", children or []) def get_spans(self): """required subclass property, returns this spans children""" return self._children def assert_structure(self, root, children=NO_CHILDREN): """ Assertion to assert on the structure of this node and it's children. This assertion takes a dictionary of properties to assert for this node along with a list of assertions to make for it's children. Example:: def test_case(self): # Assert the following structure # # One root_span, with two child_spans, one with a requests.request span # # | root_span | # | child_span | | child_span | # | requests.request | self.assert_structure( # Root span with two child_span spans dict(name='root_span'), ( # Child span with one child of it's own ( dict(name='child_span'), # One requests.request span with no children ( dict(name='requests.request'), ), ), # Child span with no children dict(name='child_span'), ), ) :param root: Properties to assert for this root span, these are passed to :meth:`tests.utils.span.TestSpan.assert_matches` :type root: dict :param children: List of child assertions to make, if children is None then do not make any assertions about this nodes children. Each list element must be a list with 2 items the first is a ``dict`` of property assertions on that child, and the second is a ``list`` of child assertions to make. :type children: list, None :raises: """ self.assert_matches(**root) # Give them a way to ignore asserting on children if children is None: return elif children is NO_CHILDREN: children = () spans = self.spans self.assert_span_count(len(children)) for i, child in enumerate(children): if not isinstance(child, (list, tuple)): child = (child, NO_CHILDREN) root, _children = child spans[i].assert_matches(parent_id=self.span_id, trace_id=self.trace_id, _parent=self) spans[i].assert_structure(root, _children) def pprint(self): parts = [super(TestSpanNode, self).pprint()] for child in self._children: parts.append("-" * 20) parts.append(child.pprint()) return "\r\n".join(parts) def assert_dict_issuperset(a, b): assert set(a.items()).issuperset(set(b.items())), "{a} is not a superset of {b}".format(a=a, b=b) @contextmanager def override_global_tracer(tracer): """Helper functions that overrides the global tracer available in the `ddtrace` package. This is required because in some `httplib` tests we can't get easily the PIN object attached to the `HTTPConnection` to replace the used tracer with a dummy tracer. """ original_tracer = ddtrace.tracer ddtrace.tracer = tracer yield ddtrace.tracer = original_tracer class SnapshotFailed(Exception): pass def snapshot(ignores=None, include_tracer=False, variants=None, async_mode=True): """Performs a snapshot integration test with the testing agent. All traces sent to the agent will be recorded and compared to a snapshot created for the test case. :param ignores: A list of keys to ignore when comparing snapshots. To refer to keys in the meta or metrics maps use "meta.key" and "metrics.key" :param tracer: A tracer providing the agent connection information to use. """ ignores = ignores or [] if include_tracer: tracer = Tracer() else: tracer = ddtrace.tracer @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): if len(args) > 1: self = args[0] clsname = self.__class__.__name__ else: clsname = "" module = inspect.getmodule(wrapped) # Use the fully qualified function name as a unique test token to # identify the snapshot. token = "{}{}{}.{}".format(module.__name__, "." if clsname else "", clsname, wrapped.__name__) # Use variant that applies to update test token. One must apply. If none # apply, the test should have been marked as skipped. if variants: applicable_variant_ids = [k for (k, v) in variants.items() if v] assert len(applicable_variant_ids) == 1 variant_id = applicable_variant_ids[0] token = "{}_{}".format(token, variant_id) if variant_id else token parsed = parse.urlparse(tracer.writer.agent_url) conn = httplib.HTTPConnection(parsed.hostname, parsed.port) try: # clear queue in case traces have been generated before test case is # itself run try: tracer.writer.flush_queue() except Exception as e: pytest.fail("Could not flush the queue before test case: %s" % str(e), pytrace=True) if async_mode: # Patch the tracer writer to include the test token header for all requests. tracer.writer._headers["X-Datadog-Test-Token"] = token else: # Signal the start of this test case to the test agent. try: conn.request("GET", "/test/start?token=%s" % token) except Exception as e: pytest.fail("Could not connect to test agent: %s" % str(e), pytrace=False) else: r = conn.getresponse() if r.status != 200: # The test agent returns nice error messages we can forward to the user. raise SnapshotFailed(r.read()) # Run the test. try: if include_tracer: kwargs["tracer"] = tracer ret = wrapped(*args, **kwargs) # Force a flush so all traces are submitted. tracer.writer.flush_queue() finally: if async_mode: del tracer.writer._headers["X-Datadog-Test-Token"] # Query for the results of the test. conn = httplib.HTTPConnection(parsed.hostname, parsed.port) conn.request("GET", "/test/snapshot?ignores=%s&token=%s" % (",".join(ignores), token)) r = conn.getresponse() if r.status != 200: raise SnapshotFailed(r.read()) return ret except SnapshotFailed as e: # Fail the test if a failure has occurred and print out the # message we got from the test agent. pytest.fail(to_unicode(e.args[0]), pytrace=False) except Exception as e: # Even though it's unlikely any traces have been sent, make the # final request to the test agent so that the test case is finished. conn = httplib.HTTPConnection(parsed.hostname, parsed.port) conn.request("GET", "/test/snapshot?ignores=%s&token=%s" % (",".join(ignores), token)) conn.getresponse() pytest.fail("Unexpected test failure during snapshot test: %s" % str(e), pytrace=True) finally: conn.close() return wrapper class AnyStr(object): def __eq__(self, other): return isinstance(other, str) class AnyInt(object): def __eq__(self, other): return isinstance(other, int) class AnyFloat(object): def __eq__(self, other): return isinstance(other, float)
__getattr__
utils.py
# This file is part of the Pattern and Anomaly Detection Library (openclean_pattern). # # Copyright (C) 2021 New York University. # # openclean_pattern is released under the Revised BSD License. See file LICENSE for # full license details. """A collection of useful utility methods""" import re from abc import ABCMeta, abstractmethod import random import bisect from collections import Counter # -- Comparators -------------------------------------------------------------- class Comparator(metaclass=ABCMeta): """Compares different dataitems """ @abstractmethod def compare(self, a, b, meta=None): """Compares a with b and returns True if a and b are equal. The comparison can involve any extra meta information that the user wants to consider Parameters: ---------- a: Any the datatype to compare b: Any the datatype to compare against meta: Any (Optional) any extra information used in the comparison Returns ------- bool """ raise NotImplementedError() class StringComparator(Comparator): """Class of useful string comparison methods """ @staticmethod def compare_strings(s1, s2, ambiguous_char='X'): """ Compares two strings in sequence of characters and replaces distinct characters with ambiguous character. Then returns the new string along with an ambiguity ratio Parameters ---------- s1 : str string 1 s2 : str string 2 ambiguous_char: str replaces the distinct characters with Returns ------- str, float """ smaller_size = min(len(s1), len(s2)) new_string = '' for i in range(smaller_size): if s1[i] == s2[i]: new_string += s1[i] else: new_string += ambiguous_char for j in range(abs(len(s1) - len(s2))): new_string += ambiguous_char ambiguity = new_string.count(ambiguous_char) / len(new_string) if len(new_string) > 0 else 0 return new_string, ambiguity @staticmethod def substring_finder(string1, string2): anslist = [] len1, len2 = len(string1), len(string2) for i in range(len1): match = "" for j in range(len2): if (i + j < len1 and string1[i + j] == string2[j]): match += string2[j] else: answer = match if answer != '' and len(answer) > 1: anslist.append(answer) match = "" if match != '': anslist.append(match) return anslist def has_numbers(inputString): return bool(re.search(r'\d', inputString)) # -- Samplers ----------------------------------------------------------------- class Sampler(metaclass=ABCMeta): """Class to sample an input iterable. This was necessary because pandas.sample sampling can be slow.""" def __init__(self, iterable, n=1): """initizlizes the Sampler class Parameters ---------- iterable: Iterable the iterable class object which has data to be sampled n: float the proportion or number of records to sample """ self.iterable = iterable self.n = n self.frac = 0 <= n <= 1 @abstractmethod def __call__(self, *args, **kwargs): """Method to sample the input iterable sequence """ raise NotImplementedError() def sample(self): """a convenience sample method """ return self.__call__() class WeightedRandomSampler(Sampler): """Implements weighted random sampling using the distribution provided collections.Counter object. Based on the work: https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ Note: if a Counter or dict of type {value:frequency} is passed in, there is no rowidx information tied to the sampled series and this can possibly require an extra lookup during anomaly detection """ def __init__(self, weights, n=1, random_state=None):
def next(self): """selects a new randomly sampled value from the input series based on their weight distribution and returns the respective index Returns ------- int """ rnd = random.random() * self.totals[-1] return bisect.bisect_right(self.totals, rnd) def __call__(self): """samples n (or n*total_inputs, if n is a fraction) times and returns the sampled frequencies as a counter Returns ------- sampled list of rows """ sample = Counter() n = int(self.totals[-1] * self.n) if self.frac else int(self.n) keys = list(self.iterable.keys()) random.seed(self.random_state) for _c in range(n): sample[keys[self.next()]] += 1 return WeightedRandomSampler.counter_to_list(sample) @staticmethod def counter_to_list(counter): """ method to create a series list from a counter object Parameters ---------- counter: collections.Counter the counter object to convert to a list Returns ------- list of values """ series = list() for k, v in counter.items(): for _ in range(v): series.append(k) return series class RandomSampler(Sampler): """Class to randomly sample an input iterable. This was necessary because pandas.sample samples a dataframe which can be slow. Note: if a Counter or dict of type {value:frequency} is passed in, there is no rowidx information tied to the sampled series and this can possibly require an extra lookup during anomaly detection """ def __init__(self, iterable, n=1, random_state=None): """initizlizes the Random Sampler class Parameters ---------- iterable: Iterable the iterable class object which has data to be sampled n: float the proportion or number of records to sample random_state: int (default: None) the seed value for the pseudo random number generator """ super(RandomSampler, self).__init__(iterable, n) self.random_state = random_state def __call__(self, *args, **kwargs): """Method to sample the input iterable sequence Returns ------- sampled list of rows """ random.seed(self.random_state) n = int(len(self.iterable) * self.n) if self.frac else int(self.n) return random.sample(self.iterable, n) class Distinct(Sampler): """Class to select only the distinct values from the input iterable""" def __init__(self, iterable): """initizlizes the Distinct class Parameters ---------- iterable: Iterable the iterable class object which has data to be sampled """ super(Distinct, self).__init__(iterable, 1) def __call__(self, *args, **kwargs): """Method to distinct-ify the input iterable sequence Returns ------- distinct list of rows """ return list(set(self.iterable)) # -- Helper methods ----------------------------------------------------------------- def list_contains_list(o, tree_types=list): """checks is list contains more lists""" if isinstance(o, tree_types): for v in o: if isinstance(v, tree_types): return True elif not isinstance(o, tree_types): # ignore values that arent lists themselves return True return False
"""initizlizes the WeightedRandomSampler class Parameters ---------- weights: collections.Counter the counter object in the format key:frequency n: float the proportion or number of records to sample random_state: int (default: None) the seed value for the pseudo random number generator """ super(WeightedRandomSampler, self).__init__(weights, n) self.random_state = random_state self.totals = [] # cumulative sum running_total = 0 for w in weights.values(): running_total += w self.totals.append(running_total)
mod.rs
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::FIFOPR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct RSVDPRR { bits: bool, } impl RSVDPRR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"]
#[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct SLOTNUMPRR { bits: u8, } impl SLOTNUMPRR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct COUNTR { bits: u8, } impl COUNTR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct DATAR { bits: u32, } impl DATAR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } } #[doc = r" Proxy"] pub struct _RSVDPRW<'a> { w: &'a mut W, } impl<'a> _RSVDPRW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 31; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _SLOTNUMPRW<'a> { w: &'a mut W, } impl<'a> _SLOTNUMPRW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 7; const OFFSET: u8 = 28; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _COUNTW<'a> { w: &'a mut W, } impl<'a> _COUNTW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 255; const OFFSET: u8 = 20; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _DATAW<'a> { w: &'a mut W, } impl<'a> _DATAW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 1048575; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 31 - RESERVED."] #[inline] pub fn rsvdpr(&self) -> RSVDPRR { let bits = { const MASK: bool = true; const OFFSET: u8 = 31; ((self.bits >> OFFSET) & MASK as u32) != 0 }; RSVDPRR { bits } } #[doc = "Bits 28:30 - Slot number associated with this FIFO data."] #[inline] pub fn slotnumpr(&self) -> SLOTNUMPRR { let bits = { const MASK: u8 = 7; const OFFSET: u8 = 28; ((self.bits >> OFFSET) & MASK as u32) as u8 }; SLOTNUMPRR { bits } } #[doc = "Bits 20:27 - Number of valid entries in the ADC FIFO."] #[inline] pub fn count(&self) -> COUNTR { let bits = { const MASK: u8 = 255; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) as u8 }; COUNTR { bits } } #[doc = "Bits 0:19 - Oldest data in the FIFO."] #[inline] pub fn data(&self) -> DATAR { let bits = { const MASK: u32 = 1048575; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }; DATAR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 31 - RESERVED."] #[inline] pub fn rsvdpr(&mut self) -> _RSVDPRW { _RSVDPRW { w: self } } #[doc = "Bits 28:30 - Slot number associated with this FIFO data."] #[inline] pub fn slotnumpr(&mut self) -> _SLOTNUMPRW { _SLOTNUMPRW { w: self } } #[doc = "Bits 20:27 - Number of valid entries in the ADC FIFO."] #[inline] pub fn count(&mut self) -> _COUNTW { _COUNTW { w: self } } #[doc = "Bits 0:19 - Oldest data in the FIFO."] #[inline] pub fn data(&mut self) -> _DATAW { _DATAW { w: self } } }
misc.py
#!/usr/bin/env python3 import functools import time class Chrono(): def __init__(self, msg=None): if msg: print(msg) self.t0 = time.time() self.t = self.t0 def lap(self, name=None): now = time.time() if name: print(name, end=': ') msg = '{:.2g} s (total: {:.2g} s)' msg = msg.format(now - self.t, now - self.t0) print(msg) self.t = now
''' Decorator composition of @property with @functools.lru_cache() ''' return property(functools.lru_cache()(func))
def cached_property(func):
clusterbuilder.go
package clusterBuilder import ( "context" "github.com/google/go-containerregistry/pkg/authn" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/api/equality"
k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/cache" "knative.dev/pkg/controller" "github.com/pivotal/kpack/pkg/apis/build/v1alpha1" corev1alpha1 "github.com/pivotal/kpack/pkg/apis/core/v1alpha1" "github.com/pivotal/kpack/pkg/client/clientset/versioned" v1alpha1informers "github.com/pivotal/kpack/pkg/client/informers/externalversions/build/v1alpha1" v1alpha1Listers "github.com/pivotal/kpack/pkg/client/listers/build/v1alpha1" "github.com/pivotal/kpack/pkg/reconciler" "github.com/pivotal/kpack/pkg/registry" "github.com/pivotal/kpack/pkg/tracker" ) const ( ReconcilerName = "ClusterBuilders" Kind = "ClusterBuilder" ) type BuilderCreator interface { CreateBuilder(keychain authn.Keychain, clusterStore *v1alpha1.ClusterStore, clusterStack *v1alpha1.ClusterStack, spec v1alpha1.BuilderSpec) (v1alpha1.BuilderRecord, error) } func NewController( opt reconciler.Options, clusterBuilderInformer v1alpha1informers.ClusterBuilderInformer, builderCreator BuilderCreator, keychainFactory registry.KeychainFactory, clusterStoreInformer v1alpha1informers.ClusterStoreInformer, clusterStackInformer v1alpha1informers.ClusterStackInformer, ) (*controller.Impl, func()) { c := &Reconciler{ Client: opt.Client, ClusterBuilderLister: clusterBuilderInformer.Lister(), BuilderCreator: builderCreator, KeychainFactory: keychainFactory, ClusterStoreLister: clusterStoreInformer.Lister(), ClusterStackLister: clusterStackInformer.Lister(), } impl := controller.NewImpl(c, opt.Logger, ReconcilerName) clusterBuilderInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue)) c.Tracker = tracker.New(impl.EnqueueKey, opt.TrackerResyncPeriod()) clusterStoreInformer.Informer().AddEventHandler(reconciler.Handler(c.Tracker.OnChanged)) clusterStackInformer.Informer().AddEventHandler(reconciler.Handler(c.Tracker.OnChanged)) return impl, func() { impl.GlobalResync(clusterBuilderInformer.Informer()) } } type Reconciler struct { Client versioned.Interface ClusterBuilderLister v1alpha1Listers.ClusterBuilderLister BuilderCreator BuilderCreator KeychainFactory registry.KeychainFactory Tracker reconciler.Tracker ClusterStoreLister v1alpha1Listers.ClusterStoreLister ClusterStackLister v1alpha1Listers.ClusterStackLister } func (c *Reconciler) Reconcile(ctx context.Context, key string) error { _, builderName, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } builder, err := c.ClusterBuilderLister.Get(builderName) if k8serrors.IsNotFound(err) { return nil } else if err != nil { return err } builder = builder.DeepCopy() builderRecord, creationError := c.reconcileBuilder(builder) if creationError != nil { builder.Status.ErrorCreate(creationError) err := c.updateStatus(builder) if err != nil { return err } return controller.NewPermanentError(creationError) } builder.Status.BuilderRecord(builderRecord) return c.updateStatus(builder) } func (c *Reconciler) reconcileBuilder(builder *v1alpha1.ClusterBuilder) (v1alpha1.BuilderRecord, error) { clusterStore, err := c.ClusterStoreLister.Get(builder.Spec.Store.Name) if err != nil { return v1alpha1.BuilderRecord{}, err } err = c.Tracker.Track(clusterStore, builder.NamespacedName()) if err != nil { return v1alpha1.BuilderRecord{}, err } clusterStack, err := c.ClusterStackLister.Get(builder.Spec.Stack.Name) if err != nil { return v1alpha1.BuilderRecord{}, err } err = c.Tracker.Track(clusterStack, builder.NamespacedName()) if err != nil { return v1alpha1.BuilderRecord{}, err } if !clusterStack.Status.GetCondition(corev1alpha1.ConditionReady).IsTrue() { return v1alpha1.BuilderRecord{}, errors.Errorf("stack %s is not ready", clusterStack.Name) } keychain, err := c.KeychainFactory.KeychainForSecretRef(registry.SecretRef{ ServiceAccount: builder.Spec.ServiceAccountRef.Name, Namespace: builder.Spec.ServiceAccountRef.Namespace, }) if err != nil { return v1alpha1.BuilderRecord{}, err } return c.BuilderCreator.CreateBuilder(keychain, clusterStore, clusterStack, builder.Spec.BuilderSpec) } func (c *Reconciler) updateStatus(desired *v1alpha1.ClusterBuilder) error { desired.Status.ObservedGeneration = desired.Generation original, err := c.ClusterBuilderLister.Get(desired.Name) if err != nil { return err } if equality.Semantic.DeepEqual(desired.Status, original.Status) { return nil } _, err = c.Client.KpackV1alpha1().ClusterBuilders().UpdateStatus(desired) return err }
register.go
// pmm-admin // Copyright 2019 Percona LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package management import ( "fmt" "os" "strings" "github.com/AlekSi/pointer" "github.com/percona/pmm/api/managementpb/json/client" "github.com/percona/pmm/api/managementpb/json/client/node" "github.com/percona/pmm/utils/nodeinfo" "gopkg.in/alecthomas/kingpin.v2" "github.com/percona/pmm-admin/commands" ) var registerResultT = commands.ParseTemplate(` pmm-agent registered. pmm-agent ID: {{ .PMMAgent.AgentID }} Node ID : {{ .PMMAgent.RunsOnNodeID }} `) type registerResult struct { GenericNode *node.RegisterNodeOKBodyGenericNode `json:"generic_node"` ContainerNode *node.RegisterNodeOKBodyContainerNode `json:"container_node"` PMMAgent *node.RegisterNodeOKBodyPMMAgent `json:"pmm_agent"` } func (res *registerResult) Result() {} func (res *registerResult) String() string { return commands.RenderTemplate(registerResultT, res) } type registerCommand struct { NodeType string NodeName string MachineID string Distro string ContainerID string ContainerName string NodeModel string Region string Az string CustomLabels string Address string MetricsMode string DisableCollectors string Force bool } func (cmd *registerCommand) Run() (commands.Result, error) { customLabels, err := commands.ParseCustomLabels(cmd.CustomLabels) if err != nil { return nil, err } params := &node.RegisterNodeParams{ Body: node.RegisterNodeBody{ NodeType: pointer.ToString(allNodeTypes[cmd.NodeType]), NodeName: cmd.NodeName, MachineID: cmd.MachineID, Distro: cmd.Distro, ContainerID: cmd.ContainerID, ContainerName: cmd.ContainerName, NodeModel: cmd.NodeModel, Region: cmd.Region, Az: cmd.Az, CustomLabels: customLabels, Address: cmd.Address, Reregister: cmd.Force, MetricsMode: pointer.ToString(strings.ToUpper(cmd.MetricsMode)), DisableCollectors: commands.ParseDisableCollectors(cmd.DisableCollectors), }, Context: commands.Ctx, } resp, err := client.Default.Node.RegisterNode(params) if err != nil { return nil, err } return &registerResult{ GenericNode: resp.Payload.GenericNode, ContainerNode: resp.Payload.ContainerNode, PMMAgent: resp.Payload.PMMAgent, }, nil } // register command var ( Register = new(registerCommand) RegisterC = kingpin.Command("register", "Register current Node at PMM Server") ) func init()
{ nodeinfo := nodeinfo.Get() if nodeinfo.PublicAddress == "" { RegisterC.Arg("node-address", "Node address").Required().StringVar(&Register.Address) } else { help := fmt.Sprintf("Node address (autodetected default: %s)", nodeinfo.PublicAddress) RegisterC.Arg("node-address", help).Default(nodeinfo.PublicAddress).StringVar(&Register.Address) } registerNodeTypeKeys := []string{"generic", "container"} // "remote" Node can't be registered with that API nodeTypeDefault := "generic" nodeTypeHelp := fmt.Sprintf("Node type, one of: %s (default: %s)", strings.Join(registerNodeTypeKeys, ", "), nodeTypeDefault) RegisterC.Arg("node-type", nodeTypeHelp).Default(nodeTypeDefault).EnumVar(&Register.NodeType, registerNodeTypeKeys...) hostname, _ := os.Hostname() nodeNameHelp := fmt.Sprintf("Node name (autodetected default: %s)", hostname) RegisterC.Arg("node-name", nodeNameHelp).Default(hostname).StringVar(&Register.NodeName) var defaultMachineID string if nodeinfo.MachineID != "" { defaultMachineID = "/machine_id/" + nodeinfo.MachineID } RegisterC.Flag("machine-id", "Node machine-id (default is autodetected)").Default(defaultMachineID).StringVar(&Register.MachineID) RegisterC.Flag("distro", "Node OS distribution (default is autodetected)").Default(nodeinfo.Distro).StringVar(&Register.Distro) RegisterC.Flag("container-id", "Container ID").StringVar(&Register.ContainerID) RegisterC.Flag("container-name", "Container name").StringVar(&Register.ContainerName) RegisterC.Flag("node-model", "Node model").StringVar(&Register.NodeModel) RegisterC.Flag("region", "Node region").StringVar(&Register.Region) RegisterC.Flag("az", "Node availability zone").StringVar(&Register.Az) RegisterC.Flag("custom-labels", "Custom user-assigned labels").StringVar(&Register.CustomLabels) RegisterC.Flag("force", "Remove Node with that name with all dependent Services and Agents if one exist").BoolVar(&Register.Force) RegisterC.Flag("metrics-mode", "Metrics flow mode, can be push - agent will push metrics,"+ " pull - server scrape metrics from agent or auto - chosen by server.").Default("auto").EnumVar(&Register.MetricsMode, "auto", "pull", "push") RegisterC.Flag("disable-collectors", "Comma-separated list of collector names to exclude from exporter").StringVar(&Register.DisableCollectors) }
linearreferenceviewmodel.js
'use strict'; /** * @ngdoc service * @name troutDashApp.LinearReferenceViewModel * @description
angular.module('troutDashApp') .factory('LinearReferenceViewModel', function () { var StreamLineViewModel = function (lineSegment, inverseLength) { this.lineSegment = null; this.xOffset = 0.0; this.width = 0.0; this.init(lineSegment, inverseLength); }; StreamLineViewModel.prototype.init = function(lineSegment, inverseLength) { this.lineSegment = lineSegment; var stop = lineSegment.Stop * inverseLength; var start = lineSegment.Start * inverseLength; this.xOffset = 1.0 - stop; this.width = Math.abs(stop - start); }; return StreamLineViewModel; });
* # LinearReferenceViewModel * Factory in the troutDashApp. */
nearpc.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import codecs import gdb from capstone import * import pwndbg.arguments import pwndbg.color import pwndbg.color.context as C import pwndbg.color.disasm as D import pwndbg.color.nearpc as N import pwndbg.color.theme import pwndbg.config import pwndbg.disasm import pwndbg.functions import pwndbg.ida import pwndbg.regs import pwndbg.strings import pwndbg.symbol import pwndbg.ui import pwndbg.vmmap from pwndbg.color import message def ljust_padding(lst): longest_len = max(map(len, lst)) if lst else 0 return [s.ljust(longest_len) for s in lst] nearpc_branch_marker = pwndbg.color.theme.Parameter('nearpc-branch-marker', ' ↓', 'branch marker line for nearpc command') nearpc_branch_marker_contiguous = pwndbg.color.theme.Parameter('nearpc-branch-marker-contiguous', ' ', 'contiguous branch marker line for nearpc command') pwndbg.color.theme.Parameter('highlight-pc', True, 'whether to highlight the current instruction') pwndbg.color.theme.Parameter('nearpc-prefix', '►', 'prefix marker for nearpc command') pwndbg.config.Parameter('left-pad-disasm', True, 'whether to left-pad disassembly') nearpc_lines = pwndbg.config.Parameter('nearpc-lines', 10, 'number of additional lines to print for the nearpc command') show_args = pwndbg.config.Parameter('nearpc-show-args', True, 'show call arguments below instruction') parser = argparse.ArgumentParser(description='''Disassemble near a specified address.''') parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to dissassemble near.") parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.") #parser.add_argument("to_string", type=bool, nargs="?", default=False, help="Whether to print it or not.") #TODO make sure this should not be exposed parser.add_argument("emulate", type=bool, nargs="?", default=False, help="Whether to emulate instructions to find the next ones or just linearly disassemble.") @pwndbg.commands.ArgparsedCommand(parser) @pwndbg.commands.OnlyWhenRunning def nearpc(pc=None, lines=None, to_string=False, emulate=False): """ Disassemble near a specified address. """ # Repeating nearpc (pressing enter) makes it show next addresses # (writing nearpc explicitly again will reset its state) if nearpc.repeat: pc = nearpc.next_pc result = [] # Fix the case where we only have one argument, and # it's a small value. if lines is None and (pc is None or int(pc) < 0x100): lines = pc pc = None if pc is None: pc = pwndbg.regs.pc if lines is None: lines = nearpc_lines // 2 pc = int(pc) lines = int(lines) # Check whether we can even read this address if not pwndbg.memory.peek(pc): result.append(message.error('Invalid address %#x' % pc)) # # Load source data if it's available # pc_to_linenos = collections.defaultdict(lambda: []) # lineno_to_src = {} # frame = gdb.selected_frame() # if frame: # sal = frame.find_sal() # if sal: # symtab = sal.symtab # objfile = symtab.objfile # sourcefilename = symtab.filename # with open(sourcefilename, 'r') as sourcefile: # lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())} # for line in symtab.linetable(): # pc_to_linenos[line.pc].append(line.line) instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat) if pwndbg.memory.peek(pc) and not instructions: result.append(message.error('Invalid instructions at %#x' % pc)) # In case $pc is in a new map we don't know about, # this will trigger an exploratory search. pwndbg.vmmap.find(pc) # Gather all addresses and symbols for each instruction symbols = [pwndbg.symbol.get(i.address) for i in instructions] addresses = ['%#x' % i.address for i in instructions] nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0 # Format the symbol name for each instruction symbols = ['<%s> ' % sym if sym else '' for sym in symbols] # Pad out all of the symbols and addresses if pwndbg.config.left_pad_disasm and not nearpc.repeat: symbols = ljust_padding(symbols) addresses = ljust_padding(addresses) prev = None # Print out each instruction for address_str, symbol, instr in zip(addresses, symbols, instructions): asm = D.instruction(instr) prefix_sign = pwndbg.config.nearpc_prefix # Show prefix only on the specified address and don't show it while in repeat-mode show_prefix = instr.address == pc and not nearpc.repeat prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign)) prefix = N.prefix(prefix) pre = pwndbg.ida.Anterior(instr.address) if pre: result.append(N.ida_anterior(pre)) # Colorize address and symbol if not highlighted # symbol is fetched from gdb and it can be e.g. '<main+8>' if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat: address_str = N.address(address_str) symbol = N.symbol(symbol) elif pwndbg.config.highlight_pc: prefix = C.highlight(prefix) address_str = C.highlight(address_str) symbol = C.highlight(symbol) line = ' '.join((prefix, address_str, symbol, asm)) # If there was a branch before this instruction which was not # contiguous, put in some ellipses. if prev and prev.address + prev.size != instr.address: result.append(N.branch_marker('%s' % nearpc_branch_marker)) # Otherwise if it's a branch and it *is* contiguous, just put # and empty line. elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)): if len('%s' % nearpc_branch_marker_contiguous) > 0: result.append('%s' % nearpc_branch_marker_contiguous) # For syscall instructions, put the name on the side if instr.address == pc: syscall_name = pwndbg.arguments.get_syscall_name(instr) if syscall_name: line += ' <%s>' % N.syscall_name(syscall_name) result.append(line) # For call instructions, attempt to resolve the target and # determine the number of arguments. if show_args: result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)]) prev = instr if not to_string: print('\n'.join(result)) return result parser = argparse.ArgumentParser(description='''Like nearpc, but will emulate instructions from the current $PC forward.''') parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to emulate near.") parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.") @pwndbg.commands.ArgparsedCommand(parser) @pwndbg.commands.OnlyWhenRunning def emulate(pc=None, lines=None, to_string=False, emulate=True): """
mulate_command = emulate parser = argparse.ArgumentParser(description='''Compatibility layer for PEDA's pdisass command.''') parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to disassemble near.") parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.") @pwndbg.commands.ArgparsedCommand(parser) @pwndbg.commands.OnlyWhenRunning def pdisass(pc=None, lines=None, to_string=False): """ Compatibility layer for PEDA's pdisass command """ nearpc.repeat = pdisass.repeat return nearpc(pc, lines, to_string, False) nearpc.next_pc = 0
Like nearpc, but will emulate instructions from the current $PC forward. """ nearpc.repeat = emulate_command.repeat return nearpc(pc, lines, to_string, emulate) e
settings.rs
pub const PI: f32 = 3.14159265359; pub const MAX_MANIFOLD_POINTS: usize = 2; pub const MAX_POLYGON_VERTICES: usize = 8;
pub const POLYGON_RADIUS: f32 = 2. * LINEAR_SLOP;
pub const LINEAR_SLOP: f32 = 0.005; pub const ANGULAR_SLOP: f32 = 2. / 180. * PI;
DOE_functions.py
#==================== # Essential imports #==================== from pyDOE import * from pyDOE_corrected import * from diversipy import * import pandas as pd import numpy as np # =========================================================================================================== # Function for constructing a DataFrame from a numpy array generated by PyDOE function and individual lists # =========================================================================================================== def construct_df(x,r): df=pd.DataFrame(data=x,dtype='float32') for i in df.index: for j in range(len(list(df.iloc[i]))): df.iloc[i][j]=r[j][int(df.iloc[i][j])] return df # =================================================================================================== # Function for constructing a DataFrame from a matrix with floating point numbers between -1 and +1 # =================================================================================================== def construct_df_from_matrix(x,factor_array): """ This function constructs a DataFrame out of x and factor_array, both of which are assumed to be numpy arrays. It projects the numbers in the x (which is output of a design-of-experiment build) to the factor array ranges. Here factor_array is assumed to have only min and max ranges. Matrix x is assumed to have numbers ranging from -1 to 1. """ row_num=x.shape[0] # Number of rows in the matrix x col_num=x.shape[1] # Number of columns in the matrix x empty=np.zeros((row_num,col_num)) def simple_substitution(idx,factor_list): if idx==-1: return factor_list[0] elif idx==0: return factor_list[1] elif idx==1: return factor_list[2] else: alpha=np.abs(factor_list[2]-factor_list[0])/2 if idx<0: beta=np.abs(idx)-1 return factor_list[0]-(beta*alpha) else: beta=idx-1 return factor_list[2]+(beta*alpha) for i in range(row_num): for j in range(col_num): empty[i,j] = simple_substitution(x[i,j],factor_array[j]) return pd.DataFrame(data=empty) # ================================================================================================= # Function for constructing a DataFrame from a matrix with floating point numbers between 0 and 1 # ================================================================================================= def construct_df_from_random_matrix(x,factor_array): """ This function constructs a DataFrame out of matrix x and factor_array, both of which are assumed to be numpy arrays. It projects the numbers in the x (which is output of a design-of-experiment build) to the factor array ranges. Here factor_array is assumed to have only min and max ranges. Matrix x is assumed to have numbers ranging from 0 to 1 only. """ row_num=x.shape[0] # Number of rows in the matrix x col_num=x.shape[1] # Number of columns in the matrix x empty=np.zeros((row_num,col_num)) def simple_substitution(idx,factor_list): alpha=np.abs(factor_list[1]-factor_list[0]) beta=idx return factor_list[0]+(beta*alpha) for i in range(row_num): for j in range(col_num): empty[i,j] = simple_substitution(x[i,j],factor_array[j]) return pd.DataFrame(data=empty) # ====================================================================================== # Function for building full factorial DataFrame from a dictionary of process variables # ====================================================================================== def build_full_fact(factor_level_ranges): """ Builds a full factorial design dataframe from a dictionary of factor/level ranges Example of the process variable dictionary: {'Pressure':[50,60,70],'Temperature':[290, 320, 350],'Flow rate':[0.9,1.0]} """ factor_lvl_count=[] factor_lists=[] for key in factor_level_ranges: factor_lvl_count.append(len(factor_level_ranges[key])) factor_lists.append(factor_level_ranges[key]) x = fullfact_corrected(factor_lvl_count) df=construct_df(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ================================================================================================================================================== # Function for building 2-level fractional factorial DataFrame from a dictionary and a generator string # ================================================================================================================================================================ def build_frac_fact(factor_level_ranges,gen_string): """ Builds a full factorial design dataframe from a dictionary of factor/level ranges. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} This function requires a little more knowledge of how the confounding will be allowed. This means that some factor effects get muddled with other interaction effects, so it’s harder to distinguish between them). Let’s assume that we just can’t afford (for whatever reason) the number of runs in a full-factorial design. We can systematically decide on a fraction of the full-factorial by allowing some of the factor main effects to be confounded with other factor interaction effects. This is done by defining an alias structure that defines, symbolically, these interactions. These alias structures are written like “C = AB” or “I = ABC”, or “AB = CD”, etc. These define how one column is related to the others. EXAMPLE ------------ For example, the alias “C = AB” or “I = ABC” indicate that there are three factors (A, B, and C) and that the main effect of factor C is confounded with the interaction effect of the product AB, and by extension, A is confounded with BC and B is confounded with AC. A full- factorial design with these three factors results in a design matrix with 8 runs, but we will assume that we can only afford 4 of those runs. To create this fractional design, we need a matrix with three columns, one for A, B, and C, only now where the levels in the C column is created by the product of the A and B columns. """ factor_count=len(factor_level_ranges) factor_lists=[] for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") if factor_count!=len(gen_string.split(' ')): print("Length of the generator string for the fractional factorial build does not match the length of the process variables dictionary") return None for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = fracfact(gen_string) def index_change(x): if x==-1: return 0 else: return x vfunc=np.vectorize(index_change) x=vfunc(x) df=construct_df(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ===================================================================================== # Function for building Plackett-Burman designs from a dictionary of process variables # ===================================================================================== def build_plackett_burman(factor_level_ranges): """ Builds a Plackett-Burman dataframe from a dictionary of factor/level ranges. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} Plackett–Burman designs are experimental designs presented in 1946 by Robin L. Plackett and J. P. Burman while working in the British Ministry of Supply.(Their goal was to find experimental designs for investigating the dependence of some measured quantity on a number of independent variables (factors), each taking L levels, in such a way as to minimize the variance of the estimates of these dependencies using a limited number of experiments. Interactions between the factors were considered negligible. The solution to this problem is to find an experimental design where each combination of levels for any pair of factors appears the same number of times, throughout all the experimental runs (refer to table). A complete factorial design would satisfy this criterion, but the idea was to find smaller designs. These designs are unique in that the number of trial conditions (rows) expands by multiples of four (e.g. 4, 8, 12, etc.). The max number of columns allowed before a design increases the number of rows is always one less than the next higher multiple of four. """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") factor_count=len(factor_level_ranges) factor_lists=[] for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = pbdesign(factor_count) def index_change(x): if x==-1: return 0 else: return x vfunc=np.vectorize(index_change) x=vfunc(x) df=construct_df(x,factor_lists) df.columns=factor_level_ranges.keys() return df # =================================================================================== # Function for building Sukharev Grid designs from a dictionary of process variables # =================================================================================== def build_sukharev(factor_level_ranges,num_samples=None): """ Builds a Sukharev-grid hypercube design dataframe from a dictionary of factor/level ranges. Number of samples raised to the power of (1/dimension), where dimension is the number of variables, must be an integer. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} num_samples: Number of samples to be generated Special property of this grid is that points are not placed on the boundaries of the hypercube, but at centroids of the subcells constituted by individual samples. This design offers optimal results for the covering radius regarding distances based on the max-norm. """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") factor_count=len(factor_level_ranges) factor_lists=[] for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) check=num_samples**((1/factor_count)) if (check-int(check)>1e-5): num_samples=(int(check)+1)**(factor_count) print("\nNumber of samples not adequate to fill a Sukharev grid. Increasing sample size to: ",num_samples) x = sukharev_grid(num_points=num_samples,dimension=factor_count) factor_lists=np.array(factor_lists) df = construct_df_from_random_matrix(x,factor_lists) df.columns=factor_level_ranges.keys() return df # =================================================================================== # Function for building Box-Behnken designs from a dictionary of process variables # =================================================================================== def build_box_behnken(factor_level_ranges,center=1): """ Builds a Box-Behnken design dataframe from a dictionary of factor/level ranges. Note 3 levels of factors are necessary. If not given, the function will automatically create 3 levels by linear mid-section method. Example of the dictionary: {'Pressure':[50,60,70],'Temperature':[290, 320, 350],'Flow rate':[0.9,1.0,1.1]} In statistics, Box–Behnken designs are experimental designs for response surface methodology, devised by George E. P. Box and Donald Behnken in 1960, to achieve the following goals: * Each factor, or independent variable, is placed at one of three equally spaced values, usually coded as −1, 0, +1. (At least three levels are needed for the following goal.) * The design should be sufficient to fit a quadratic model, that is, one containing squared terms, products of two factors, linear terms and an intercept. * The ratio of the number of experimental points to the number of coefficients in the quadratic model should be reasonable (in fact, their designs kept it in the range of 1.5 to 2.6).*estimation variance should more or less depend only on the distance from the centre (this is achieved exactly for the designs with 4 and 7 factors), and should not vary too much inside the smallest (hyper)cube containing the experimental points. """ for key in factor_level_ranges: if len(factor_level_ranges[key])==2: factor_level_ranges[key].append((factor_level_ranges[key][0]+factor_level_ranges[key][1])/2) factor_level_ranges[key].sort() print(f"{key} had only two end points. Creating a mid-point by averaging them") factor_count=len(factor_level_ranges) factor_lists=[] for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = bbdesign_corrected(factor_count,center=center) x=x+1 #Adjusting the index up by 1 df=construct_df(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ===================================================================================================== # Function for building central-composite (Box-Wilson) designs from a dictionary of process variables # ===================================================================================================== def build_central_composite(factor_level_ranges,center=(2,2),alpha='o',face='ccc'): """ Builds a central-composite design dataframe from a dictionary of factor/level ranges. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} In statistics, a central composite design is an experimental design, useful in response surface methodology, for building a second order (quadratic) model for the response variable without needing to use a complete three-level factorial experiment. The design consists of three distinct sets of experimental runs: * A factorial (perhaps fractional) design in the factors studied, each having two levels; * A set of center points, experimental runs whose values of each factor are the medians of the values used in the factorial portion. This point is often replicated in order to improve the precision of the experiment; * A set of axial points, experimental runs identical to the centre points except for one factor, which will take on values both below and above the median of the two factorial levels, and typically both outside their range. All factors are varied in this way. """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") # Creates the mid-points by averaging the low and high levels for key in factor_level_ranges: if len(factor_level_ranges[key])==2: factor_level_ranges[key].append((factor_level_ranges[key][0]+factor_level_ranges[key][1])/2) factor_level_ranges[key].sort() factor_count=len(factor_level_ranges) factor_lists=[] for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = ccdesign(factor_count,center=center,alpha=alpha,face=face) factor_lists=np.array(factor_lists) df = construct_df_from_matrix(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ==================================================================================== # Function for building simple Latin Hypercube from a dictionary of process variables # ==================================================================================== def build_lhs(factor_level_ranges, num_samples=None, prob_distribution=None): """ Builds a Latin Hypercube design dataframe from a dictionary of factor/level ranges. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} num_samples: Number of samples to be generated prob_distribution: Analytical probability distribution to be applied over the randomized sampling. Takes strings like: 'Normal', 'Poisson', 'Exponential', 'Beta', 'Gamma' Latin hypercube sampling (LHS) is a form of stratified sampling that can be applied to multiple variables. The method commonly used to reduce the number or runs necessary for a Monte Carlo simulation to achieve a reasonably accurate random distribution. LHS can be incorporated into an existing Monte Carlo model fairly easily, and work with variables following any analytical probability distribution. """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") factor_count=len(factor_level_ranges) factor_lists=[] if num_samples==None: num_samples=factor_count for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = lhs(n=factor_count,samples=num_samples) factor_lists=np.array(factor_lists) df = construct_df_from_random_matrix(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ============================================================================================ # Function for building space-filling Latin Hypercube from a dictionary of process variables # ============================================================================================ def build_space_filling_lhs(factor_level_ranges, num_samples=None): """ Builds a space-filling Latin Hypercube design dataframe from a dictionary of factor/level ranges. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} num_samples: Number of samples to be generated """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") factor_count=len(factor_level_ranges) factor_lists=[] if num_samples==None: num_samples=factor_count for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = transform_spread_out(lhd_matrix(num_points=num_samples,dimension=factor_count)) # create latin hypercube design factor_lists=np.array(factor_lists) df = construct_df_from_random_matrix(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ===================================================================================================== # Function for building designs with random _k-means_ clusters from a dictionary of process variables # ===================================================================================================== def build_random_k_means(factor_leve
s=None): """ This function aims to produce a centroidal Voronoi tesselation of the unit random hypercube and generate k-means clusters. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} num_samples: Number of samples to be generated """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") factor_count=len(factor_level_ranges) factor_lists=[] if num_samples==None: num_samples=factor_count for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = random_k_means(num_points=num_samples,dimension=factor_count) # create latin hypercube design factor_lists=np.array(factor_lists) df = construct_df_from_random_matrix(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ============================================================================================= # Function for building maximin reconstruction matrix from a dictionary of process variables # ============================================================================================= def build_maximin(factor_level_ranges, num_samples=None): """ Builds a maximin reconstructed design dataframe from a dictionary of factor/level ranges. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} num_samples: Number of samples to be generated This algorithm carries out a user-specified number of iterations to maximize the minimal distance of a point in the set to * other points in the set, * existing (fixed) points, * the boundary of the hypercube. """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") factor_count=len(factor_level_ranges) factor_lists=[] if num_samples==None: num_samples=factor_count for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = maximin_reconstruction(num_points=num_samples,dimension=factor_count) # create latin hypercube design factor_lists=np.array(factor_lists) df = construct_df_from_random_matrix(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ======================================================================================== # Function for building Halton matrix based design from a dictionary of process variables # ======================================================================================== def build_halton(factor_level_ranges, num_samples=None): """ Builds a quasirandom dataframe from a dictionary of factor/level ranges using prime numbers as seed. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} num_samples: Number of samples to be generated Quasirandom sequence using the default initialization with first n prime numbers equal to the number of factors/variables. """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") factor_count=len(factor_level_ranges) factor_lists=[] if num_samples==None: num_samples=factor_count for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = halton(num_points=num_samples,dimension=factor_count) # create Halton matrix design factor_lists=np.array(factor_lists) df = construct_df_from_random_matrix(x,factor_lists) df.columns=factor_level_ranges.keys() return df # ========================================================================================== # Function for building uniform random design matrix from a dictionary of process variables # ========================================================================================== def build_uniform_random (factor_level_ranges, num_samples=None): """ Builds a design dataframe with samples drawn from uniform random distribution based on a dictionary of factor/level ranges. Only min and max values of the range are required. Example of the dictionary: {'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]} num_samples: Number of samples to be generated """ for key in factor_level_ranges: if len(factor_level_ranges[key])!=2: factor_level_ranges[key][1]=factor_level_ranges[key][-1] factor_level_ranges[key]=factor_level_ranges[key][:2] print(f"{key} had more than two levels. Assigning the end point to the high level.") factor_count=len(factor_level_ranges) factor_lists=[] if num_samples==None: num_samples=factor_count for key in factor_level_ranges: factor_lists.append(factor_level_ranges[key]) x = random_uniform(num_points=num_samples,dimension=factor_count) # create Halton matrix design factor_lists=np.array(factor_lists) df = construct_df_from_random_matrix(x,factor_lists) df.columns=factor_level_ranges.keys() return df
l_ranges, num_sample
check.go
package kvstore type Checker interface { CheckExist(key string) error CheckType(key string, typeName string) error } var _ Checker = (*store)(nil) // CheckExist 检查 key 是否存在,如果不存在或有中间错误会返回 error func (s *store) CheckExist(key string) error { s.lock.RLock() defer s.lock.RUnlock() return s.checkExist(key) } func (s *store) checkExist(key string) error { _, err := s.readValue(key) // 不检查 value 是否为 nil // 因为如果 key 不存在 readValue 一定会返回 error // 而 key 对应 None 值则不会返回 error 但 value 为 nil return err } // CheckType 返回 key 对应的类型是否合法 // 类型名称支持包括 TypeName 所返回的名称 // 或是 "number" 代表 "int" 或 "float" func (s *store) CheckType(key string, typeName string) error { s.lock.RLock() defer s.lock.RUnlock() return s.checkType(key, typeName) } func (s *store) checkType(key string, typeName string) error { v, err := s.readValue(key) if err != nil { return err } expectName := TypeName(v) if expectName == typeName { return nil } if typeName == "any" || typeName == "" { return nil } switch expectName { case "int", "float": if typeName == "number" { return nil } return ErrKeyTypeNotMatch{ Key: key, Expect: typeName, Got: expectName + " (number)", } case "list", "map": // TODO } return ErrKeyTypeNotMatch{ Key: key, Expect: typeName, Got: expectName, } } // TypeNameCompare 检查两个类型的包含关系 // 0 代表相同,-1 代表前者范围更小,1 代表后者返回更小 // -2 代表检查不通过(二者无交集) func TypeNameCompare(previous, current string) int { if previous == current { return 0 } if previous == "any" { // previous > current return 1 } if current == "any" { // current > previous return -1 } if previous == "number" && (current == "int"
"float") { // previous > current return 1 } if current == "number" && (previous == "int" || previous == "float") { // current > previous return -1 } return -2 // 无关 }
|| current ==
cpp_lint.py
#!/usr/bin/python2 # # Copyright (c) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Does google-lint on c++ files. The goal of this script is to identify places in the code that *may* be in non-compliance with google style. It does not attempt to fix up these problems -- the point is to educate. It does also not attempt to find all problems, or to ensure that everything it does find is legitimately a problem. In particular, we can get very confused by /* and // inside strings! We do a small hack, which is to ignore //'s with "'s after them on the same line, but it is far from perfect (in either direction). """ import codecs import copy import getopt import math # for log import os import re import sre_compile import string import sys import unicodedata _USAGE = """ Syntax: cpp_lint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] [--counting=total|toplevel|detailed] [--root=subdir] [--linelength=digits] <file> [file] ... The style guidelines this tries to follow are those in http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml Every problem is given a confidence score from 1-5, with 5 meaning we are certain of the problem, and 1 meaning it could be a legitimate construct. This will miss some errors, and is not a substitute for a code review. To suppress false-positive errors of a certain category, add a 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) suppresses errors of all categories on that line. The files passed in will be linted; at least one file must be provided. Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the extensions with the --extensions flag. Flags: output=vs7 By default, the output is formatted to ease emacs parsing. Visual Studio compatible output (vs7) may also be used. Other formats are unsupported. verbose=# Specify a number 0-5 to restrict errors to certain verbosity levels. filter=-x,+y,... Specify a comma-separated list of category-filters to apply: only error messages whose category names pass the filters will be printed. (Category names are printed with the message and look like "[whitespace/indent]".) Filters are evaluated left to right. "-FOO" and "FOO" means "do not print categories that start with FOO". "+FOO" means "do print categories that start with FOO". Examples: --filter=-whitespace,+whitespace/braces --filter=whitespace,runtime/printf,+runtime/printf_format --filter=-,+build/include_what_you_use To see a list of all the categories used in cpplint, pass no arg: --filter= counting=total|toplevel|detailed The total number of errors found is always printed. If 'toplevel' is provided, then the count of errors in each of the top-level categories like 'build' and 'whitespace' will also be printed. If 'detailed' is provided, then a count is provided for each category like 'build/class'. root=subdir The root directory used for deriving header guard CPP variable. By default, the header guard CPP variable is calculated as the relative path to the directory that contains .git, .hg, or .svn. When this flag is specified, the relative path is calculated from the specified directory. If the specified directory does not exist, this flag is ignored. Examples: Assuing that src/.git exists, the header guard CPP variables for src/chrome/browser/ui/browser.h are: No flag => CHROME_BROWSER_UI_BROWSER_H_ --root=chrome => BROWSER_UI_BROWSER_H_ --root=chrome/browser => UI_BROWSER_H_ linelength=digits This is the allowed line length for the project. The default value is 80 characters. Examples: --linelength=120 extensions=extension,extension,... The allowed file extensions that cpplint will check Examples: --extensions=hpp,cpp """ # We categorize each error message we print. Here are the categories. # We want an explicit list so we can list them all in cpplint --filter=. # If you add a new error message with a new category, add it to the list # here! cpplint_unittest.py should tell you if you forget to do this. _ERROR_CATEGORIES = [ 'build/class', 'build/deprecated', 'build/endif_comment', 'build/explicit_make_pair', 'build/forward_decl', 'build/header_guard', 'build/include', 'build/include_alpha', 'build/include_dir', 'build/include_order', 'build/include_what_you_use', 'build/namespaces', 'build/printf_format', 'build/storage_class', 'caffe/alt_fn', 'caffe/data_layer_setup', 'caffe/random_fn', 'legal/copyright', 'readability/alt_tokens', 'readability/braces', 'readability/casting', 'readability/check', 'readability/constructors', 'readability/fn_size', 'readability/function', 'readability/multiline_comment', 'readability/multiline_string', 'readability/namespace', 'readability/nolint', 'readability/nul', 'readability/streams', 'readability/todo', 'readability/utf8', 'runtime/arrays', 'runtime/casting', 'runtime/explicit', 'runtime/int', 'runtime/init', 'runtime/invalid_increment', 'runtime/member_string_references', 'runtime/memset', 'runtime/operator', 'runtime/printf', 'runtime/printf_format', 'runtime/references', 'runtime/string', 'runtime/threadsafe_fn', 'runtime/vlog', 'whitespace/blank_line', 'whitespace/braces', 'whitespace/comma', 'whitespace/comments', 'whitespace/empty_conditional_body', 'whitespace/empty_loop_body', 'whitespace/end_of_line', 'whitespace/ending_newline', 'whitespace/forcolon', 'whitespace/indent', 'whitespace/line_length', 'whitespace/newline', 'whitespace/operators', 'whitespace/parens', 'whitespace/semicolon', 'whitespace/tab', 'whitespace/todo' ] # The default state of the category filter. This is overrided by the --filter= # flag. By default all errors are on, so only add here categories that should be # off by default (i.e., categories that must be enabled by the --filter= flags). # All entries here should start with a '-' or '+', as in the --filter= flag. _DEFAULT_FILTERS = [ '-build/include_dir', '-readability/todo', ] # We used to check for high-bit characters, but after much discussion we # decided those were OK, as long as they were in UTF-8 and didn't represent # hard-coded international strings, which belong in a separate i18n file. # C++ headers _CPP_HEADERS = frozenset([ # Legacy 'algobase.h', 'algo.h', 'alloc.h', 'builtinbuf.h', 'bvector.h', 'complex.h', 'defalloc.h', 'deque.h', 'editbuf.h', 'fstream.h', 'function.h', 'hash_map', 'hash_map.h', 'hash_set', 'hash_set.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip.h', 'iostream.h', 'istream.h', 'iterator.h', 'list.h', 'map.h', 'multimap.h', 'multiset.h', 'ostream.h', 'pair.h', 'parsestream.h', 'pfstream.h', 'procbuf.h', 'pthread_alloc', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h', 'set.h', 'slist', 'slist.h', 'stack.h', 'stdiostream.h', 'stl_alloc.h', 'stl_relops.h', 'streambuf.h', 'stream.h', 'strfile.h', 'strstream.h', 'tempbuf.h', 'tree.h', 'type_traits.h', 'vector.h', # 17.6.1.2 C++ library headers 'algorithm', 'array', 'atomic', 'bitset', 'chrono', 'codecvt', 'complex', 'condition_variable', 'deque', 'exception', 'forward_list', 'fstream', 'functional', 'future', 'initializer_list', 'iomanip', 'ios', 'iosfwd', 'iostream', 'istream', 'iterator', 'limits', 'list', 'locale', 'map', 'memory', 'mutex', 'new', 'numeric', 'ostream', 'queue', 'random', 'ratio', 'regex', 'set', 'sstream', 'stack', 'stdexcept', 'streambuf', 'string', 'strstream', 'system_error', 'thread', 'tuple', 'typeindex', 'typeinfo', 'type_traits', 'unordered_map', 'unordered_set', 'utility', 'valarray', 'vector', # 17.6.1.2 C++ headers for C library facilities 'cassert', 'ccomplex', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes', 'ciso646', 'climits', 'clocale', 'cmath', 'csetjmp', 'csignal', 'cstdalign', 'cstdarg', 'cstdbool', 'cstddef', 'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctgmath', 'ctime', 'cuchar', 'cwchar', 'cwctype', ]) # Assertion macros. These are defined in base/logging.h and # testing/base/gunit.h. Note that the _M versions need to come first # for substring matching to work. _CHECK_MACROS = [ 'DCHECK', 'CHECK', 'EXPECT_TRUE_M', 'EXPECT_TRUE', 'ASSERT_TRUE_M', 'ASSERT_TRUE', 'EXPECT_FALSE_M', 'EXPECT_FALSE', 'ASSERT_FALSE_M', 'ASSERT_FALSE', ] # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) for op, replacement in [('==', 'EQ'), ('!=', 'NE'), ('>=', 'GE'), ('>', 'GT'), ('<=', 'LE'), ('<', 'LT')]: _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), ('>=', 'LT'), ('>', 'LE'), ('<=', 'GT'), ('<', 'GE')]: _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement # Alternative tokens and their replacements. For full list, see section 2.5 # Alternative tokens [lex.digraph] in the C++ standard. # # Digraphs (such as '%:') are not included here since it's a mess to # match those on a word boundary. _ALT_TOKEN_REPLACEMENT = { 'and': '&&', 'bitor': '|', 'or': '||', 'xor': '^', 'compl': '~', 'bitand': '&', 'and_eq': '&=', 'or_eq': '|=', 'xor_eq': '^=', 'not': '!', 'not_eq': '!=' } # Compile regular expression that matches all the above keywords. The "[ =()]" # bit is meant to avoid matching these keywords outside of boolean expressions. # # False positives include C-style multi-line comments and multi-line strings # but those have always been troublesome for cpplint. _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') # These constants define types of headers for use with # _IncludeState.CheckNextIncludeOrder(). _C_SYS_HEADER = 1 _CPP_SYS_HEADER = 2 _LIKELY_MY_HEADER = 3 _POSSIBLE_MY_HEADER = 4 _OTHER_HEADER = 5 # These constants define the current inline assembly state _NO_ASM = 0 # Outside of inline assembly block _INSIDE_ASM = 1 # Inside inline assembly block _END_ASM = 2 # Last line of inline assembly block _BLOCK_ASM = 3 # The whole block is an inline assembly block # Match start of assembly blocks _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' r'(?:\s+(volatile|__volatile__))?' r'\s*[{(]') _regexp_compile_cache = {} # Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...). _RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?') # {str, set(int)}: a map from error categories to sets of linenumbers # on which those errors are expected and should be suppressed. _error_suppressions = {} # Finds Copyright. _RE_COPYRIGHT = re.compile(r'Copyright') # The root directory used for deriving header guard CPP variable. # This is set by --root flag. _root = None # The allowed line length of files. # This is set by --linelength flag. _line_length = 80 # The allowed extensions for file names # This is set by --extensions flag. _valid_extensions = set(['cc', 'h', 'cpp', 'hpp', 'cu', 'cuh']) def ParseNolintSuppressions(filename, raw_line, linenum, error): """Updates the global list of error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler. """ # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*). matched = _RE_SUPPRESSION.search(raw_line) if matched: if matched.group(1) == '_NEXT_LINE': linenum += 1 category = matched.group(2) if category in (None, '(*)'): # => "suppress all" _error_suppressions.setdefault(None, set()).add(linenum) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(linenum) else: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category) def ResetNolintSuppressions(): "Resets the set of NOLINT suppressions to empty." _error_suppressions.clear() def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment. """ return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set())) def Match(pattern, s): """Matches the string with the pattern, caching the compiled regexp.""" # The regexp compilation caching is inlined in both Match and Search for # performance reasons; factoring it out into a separate function turns out # to be noticeably expensive. if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].match(s) def ReplaceAll(pattern, rep, s): """Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements) """ if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s) def Search(pattern, s): """Searches the string for the pattern, caching the compiled regexp.""" if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s) class _IncludeState(dict): """Tracks line numbers for includes, and the order in which includes appear. As a dict, an _IncludeState object serves as a mapping between include filename and line number on which that file was included. Call CheckNextIncludeOrder() once for each header in the file, passing in the type constants defined above. Calls in an illegal order will raise an _IncludeError with an appropriate error message. """ # self._section will move monotonically through this set. If it ever # needs to move backwards, CheckNextIncludeOrder will raise an error. _INITIAL_SECTION = 0 _MY_H_SECTION = 1 _C_SECTION = 2 _CPP_SECTION = 3 _OTHER_H_SECTION = 4 _TYPE_NAMES = { _C_SYS_HEADER: 'C system header', _CPP_SYS_HEADER: 'C++ system header', _LIKELY_MY_HEADER: 'header this file implements', _POSSIBLE_MY_HEADER: 'header this file may implement', _OTHER_HEADER: 'other header', } _SECTION_NAMES = { _INITIAL_SECTION: "... nothing. (This can't be an error.)", _MY_H_SECTION: 'a header this file implements', _C_SECTION: 'C system header', _CPP_SECTION: 'C++ system header', _OTHER_H_SECTION: 'other header', } def __init__(self): dict.__init__(self) self.ResetSection() def ResetSection(self): # The name of the current section. self._section = self._INITIAL_SECTION # The path of last found header. self._last_header = '' def SetLastHeader(self, header_path): self._last_header = header_path def CanonicalizeAlphabeticalOrder(self, header_path): """Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path. """ return header_path.replace('-inl.h', '.h').replace('-', '_').lower() def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): """Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order. """ # If previous section is different from current section, _last_header will # be reset to empty string, so it's always less than current header. # # If previous line was a blank line, assume that the headers are # intentionally sorted the way they are. if (self._last_header > header_path and not Match(r'^\s*$', clean_lines.elided[linenum - 1])): return False return True def CheckNextIncludeOrder(self, header_type): """Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong. """ error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if header_type == _C_SYS_HEADER: if self._section <= self._C_SECTION: self._section = self._C_SECTION else: self._last_header = '' return error_message elif header_type == _CPP_SYS_HEADER: if self._section <= self._CPP_SECTION: self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif header_type == _LIKELY_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif header_type == _POSSIBLE_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: # This will always be the fallback because we're not sure # enough that the header is associated with this file. self._section = self._OTHER_H_SECTION else: assert header_type == _OTHER_HEADER self._section = self._OTHER_H_SECTION if last_section != self._section: self._last_header = '' return '' class _CppLintState(object): """Maintains module-wide state..""" def __init__(self): self.verbose_level = 1 # global setting. self.error_count = 0 # global count of reported errors # filters to apply when emitting error messages self.filters = _DEFAULT_FILTERS[:] self.counting = 'total' # In what way are we counting errors? self.errors_by_category = {} # string to int dict storing error counts # output format: # "emacs" - format that emacs can parse (default) # "vs7" - format that Microsoft Visual Studio 7 can parse self.output_format = 'emacs' def SetOutputFormat(self, output_format): """Sets the output format for errors.""" self.output_format = output_format def SetVerboseLevel(self, level): """Sets the module's verbosity, and returns the previous setting.""" last_verbose_level = self.verbose_level self.verbose_level = level return last_verbose_level def SetCountingStyle(self, counting_style): """Sets the module's counting options.""" self.counting = counting_style def SetFilters(self, filters): """Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" """ # Default filters always have less priority than the flag ones. self.filters = _DEFAULT_FILTERS[:] for filt in filters.split(','): clean_filt = filt.strip() if clean_filt: self.filters.append(clean_filt) for filt in self.filters: if not (filt.startswith('+') or filt.startswith('-')): raise ValueError('Every filter in --filters must start with + or -' ' (%s does not)' % filt) def ResetErrorCounts(self): """Sets the module's error statistic back to zero.""" self.error_count = 0 self.errors_by_category = {} def IncrementErrorCount(self, category): """Bumps the module's error statistic.""" self.error_count += 1 if self.counting in ('toplevel', 'detailed'): if self.counting != 'detailed': category = category.split('/')[0] if category not in self.errors_by_category: self.errors_by_category[category] = 0 self.errors_by_category[category] += 1 def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count) _cpplint_state = _CppLintState() def _OutputFormat(): """Gets the module's output format.""" return _cpplint_state.output_format def _SetOutputFormat(output_format): """Sets the module's output format.""" _cpplint_state.SetOutputFormat(output_format) def _VerboseLevel(): """Returns the module's verbosity setting.""" return _cpplint_state.verbose_level def _SetVerboseLevel(level): """Sets the module's verbosity, and returns the previous setting.""" return _cpplint_state.SetVerboseLevel(level) def _SetCountingStyle(level): """Sets the module's counting options.""" _cpplint_state.SetCountingStyle(level) def _Filters(): """Returns the module's list of output filters, as a list.""" return _cpplint_state.filters def _SetFilters(filters): """Sets the module's error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.SetFilters(filters) class _FunctionState(object): """Tracks current function name and the number of lines in its body.""" _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. def __init__(self): self.in_a_function = False self.lines_in_function = 0 self.current_function = '' def Begin(self, function_name): """Start analyzing function body. Args: function_name: The name of the function being tracked. """ self.in_a_function = True self.lines_in_function = 0 self.current_function = function_name def Count(self): """Count line in current function body.""" if self.in_a_function: self.lines_in_function += 1 def Check(self, error, filename, linenum): """Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check. """ if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger)) def End(self): """Stop analyzing function body.""" self.in_a_function = False class _IncludeError(Exception): """Indicates a problem with the include order in a file.""" pass class FileInfo: """Provides utility functions for filenames. FileInfo provides easy access to the components of a file's path relative to the project root. """ def __init__(self, filename): self._filename = filename def FullName(self): """Make Windows paths like Unix.""" return os.path.abspath(self._filename).replace('\\', '/') def RepositoryName(self): """FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like "C:\Documents and Settings\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors. """ fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, ".svn")): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. root_dir = os.path.dirname(fullname) while (root_dir != os.path.dirname(root_dir) and not os.path.exists(os.path.join(root_dir, ".git")) and not os.path.exists(os.path.join(root_dir, ".hg")) and not os.path.exists(os.path.join(root_dir, ".svn"))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or os.path.exists(os.path.join(root_dir, ".svn"))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Don't know what to do; header guard warnings may be wrong... return fullname def Split(self): """Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension). """ googlename = self.RepositoryName() project, rest = os.path.split(googlename) return (project,) + os.path.splitext(rest) def BaseName(self): """File base name - text after the final slash, before the final period.""" return self.Split()[1] def Extension(self): """File extension - text following the final period.""" return self.Split()[2] def NoExtension(self): """File has no source file extension.""" return '/'.join(self.Split()[0:2]) def IsSource(self): """File has a source file extension.""" return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') def _ShouldPrintError(category, confidence, linenum): """If confidence >= verbose, category passes filter and is not suppressed.""" # There are three ways we might decide not to print an error message: # a "NOLINT(category)" comment appears in the source, # the verbosity level isn't high enough, or the filters filter it out. if IsErrorSuppressedByNolint(category, linenum): return False if confidence < _cpplint_state.verbose_level: return False is_filtered = False for one_filter in _Filters(): if one_filter.startswith('-'): if category.startswith(one_filter[1:]): is_filtered = True elif one_filter.startswith('+'): if category.startswith(one_filter[1:]): is_filtered = False else: assert False # should have been checked for in SetFilter. if is_filtered: return False return True def Error(filename, linenum, category, confidence, message): """Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message. """ if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') # Matches strings. Escape codes should already be removed by ESCAPES. _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') # Matches characters. Escape codes should already be removed by ESCAPES. _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") # Matches multi-line C++ comments. # This RE is a little bit more complicated than one might expect, because we # have to take care of space removals tools so we can handle comments inside # statements better. # The current rule is: We only clear spaces from both sides when we're at the # end of the line. Otherwise, we try to remove spaces from the right side, # if this doesn't work we try on left side but only if there's a non-character # on the right. _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( r"""(\s*/\*.*\*/\s*$| /\*.*\*/\s+| \s+/\*.*\*/(?=\W)| /\*.*\*/)""", re.VERBOSE) def IsCppString(line): """Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant. """ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 def CleanseRawStrings(raw_lines): """Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings. """ delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: # Inside a raw string, look for the end end = line.find(delimiter) if end >= 0: # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. leading_space = Match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: # Haven't found the end yet, append a blank line. line = '' else: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if matched: delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings def FindNextMultiLineCommentStart(lines, lineix): """Find the beginning marker for a multiline comment.""" while lineix < len(lines): if lines[lineix].strip().startswith('/*'): # Only return this marker if the comment goes beyond this line if lines[lineix].strip().find('*/', 2) < 0: return lineix lineix += 1 return len(lines) def FindNextMultiLineCommentEnd(lines, lineix): """We are inside a comment, find the end marker.""" while lineix < len(lines): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines) def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" # Having // dummy comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '// dummy' def RemoveMultiLineComments(filename, lines, error): """Removes multiline (c-style) comments from lines.""" lineix = 0 while lineix < len(lines): lineix_begin = FindNextMultiLineCommentStart(lines, lineix) if lineix_begin >= len(lines): return lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) if lineix_end >= len(lines): error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) lineix = lineix_end + 1 def CleanseComments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) class CleansedLines(object): """Holds 3 copies of all lines with different preprocessing applied to them. 1) elided member contains lines without strings and comments, 2) lines member contains lines without comments, and 3) raw_lines member contains all the lines without processing. All these three members are of <type 'list'>, and of the same length. """ def __init__(self, lines): self.elided = [] self.lines = [] self.raw_lines = lines self.num_lines = len(lines) self.lines_without_raw_strings = CleanseRawStrings(lines) for linenum in range(len(self.lines_without_raw_strings)): self.lines.append(CleanseComments( self.lines_without_raw_strings[linenum])) elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) self.elided.append(CleanseComments(elided)) def NumLines(self): """Returns the number of lines represented.""" return self.num_lines @staticmethod def _CollapseStrings(elided): """Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings. """ if not _RE_PATTERN_INCLUDE.match(elided): # Remove escaped characters first to make quote/single quote collapsing # basic. Things that look like escaped characters shouldn't occur # outside of strings and chars. elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) return elided def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): """Find the position just after the matching endchar. Args: line: a CleansedLines line. startpos: start searching at this position. depth: nesting level at startpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching endchar: (index just after matching endchar, 0) Otherwise: (-1, new depth at end of this line) """ for i in xrange(startpos, len(line)): if line[i] == startchar: depth += 1 elif line[i] == endchar: depth -= 1 if depth == 0: return (i + 1, 0) return (-1, depth) def CloseExpression(clean_lines, linenum, pos): """If input points to ( or { or [ or <, finds the position that closes it. If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the linenum/pos that correspond to the closing of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *past* the closing brace, or (line, len(lines), -1) if we never find a close. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] startchar = line[pos] if startchar not in '({[<': return (line, clean_lines.NumLines(), -1) if startchar == '(': endchar = ')' if startchar == '[': endchar = ']' if startchar == '{': endchar = '}' if startchar == '<': endchar = '>' # Check first line (end_pos, num_open) = FindEndOfExpressionInLine( line, pos, 0, startchar, endchar) if end_pos > -1: return (line, linenum, end_pos) # Continue scanning forward while linenum < clean_lines.NumLines() - 1: linenum += 1 line = clean_lines.elided[linenum] (end_pos, num_open) = FindEndOfExpressionInLine( line, 0, num_open, startchar, endchar) if end_pos > -1: return (line, linenum, end_pos) # Did not find endchar before end of file, give up return (line, clean_lines.NumLines(), -1) def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar): """Find position at the matching startchar. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. depth: nesting level at endpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching startchar: (index at matching startchar, 0) Otherwise: (-1, new depth at beginning of this line) """ for i in xrange(endpos, -1, -1): if line[i] == endchar: depth += 1 elif line[i] == startchar: depth -= 1 if depth == 0: return (i, 0) return (-1, depth) def ReverseCloseExpression(clean_lines, linenum, pos): """If input points to ) or } or ] or >, finds the position that opens it. If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the linenum/pos that correspond to the opening of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *at* the opening brace, or (line, 0, -1) if we never find the matching opening brace. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] endchar = line[pos] if endchar not in ')}]>': return (line, 0, -1) if endchar == ')': startchar = '(' if endchar == ']': startchar = '[' if endchar == '}': startchar = '{' if endchar == '>': startchar = '<' # Check last line (start_pos, num_open) = FindStartOfExpressionInLine( line, pos, 0, startchar, endchar) if start_pos > -1: return (line, linenum, start_pos) # Continue scanning backward while linenum > 0: linenum -= 1 line = clean_lines.elided[linenum] (start_pos, num_open) = FindStartOfExpressionInLine( line, len(line) - 1, num_open, startchar, endchar) if start_pos > -1: return (line, linenum, start_pos) # Did not find startchar before beginning of file, give up return (line, 0, -1) def CheckForCopyright(filename, lines, error): """Logs an error if a Copyright message appears at the top of the file.""" # We'll check up to line 10. Don't forget there's a # dummy line at the front. for line in xrange(1, min(len(lines), 11)): if _RE_COPYRIGHT.search(lines[line], re.I): error(filename, 0, 'legal/copyright', 5, 'Copyright message found. ' 'You should not include a copyright line.') def GetHeaderGuardCPPVariable(filename): """Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file. """ # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub(r'_flymake\.h$', '.h', filename) filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' def CheckForHeaderGuard(filename, lines, error): """Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ cppvar = GetHeaderGuardCPPVariable(filename) ifndef = None ifndef_linenum = 0 define = None endif = None endif_linenum = 0 for linenum, line in enumerate(lines): linesplit = line.split() if len(linesplit) >= 2: # find the first occurrence of #ifndef and #define, save arg if not ifndef and linesplit[0] == '#ifndef': # set ifndef to the header guard presented on the #ifndef line. ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == '#define': define = linesplit[1] # find the last occurrence of #endif, save entire line if line.startswith('#endif'): endif = line endif_linenum = linenum if not ifndef: error(filename, 0, 'build/header_guard', 5, 'No #ifndef header guard found, suggested CPP variable is: %s' % cppvar) return if not define: error(filename, 0, 'build/header_guard', 5, 'No #define header guard found, suggested CPP variable is: %s' % cppvar) return # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ # for backward compatibility. if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, '#ifndef header guard has wrong style, please use: %s' % cppvar) if define != ifndef: error(filename, 0, 'build/header_guard', 5, '#ifndef and #define don\'t match, suggested CPP variable is: %s' % cppvar) return if endif != ('#endif // %s' % cppvar): error_level = 0 if endif != ('#endif // %s' % (cppvar + '_')): error_level = 5 ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, error) error(filename, endif_linenum, 'build/header_guard', error_level, '#endif line should be "#endif // %s"' % cppvar) def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if u'\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') def CheckForNewlineAtEOF(filename, lines, error): """Logs an error if there is no newline char at the end of the file. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ # The array lines() was created by adding two newlines to the # original file (go figure), then splitting on \n. # To verify that the file ends in \n, we just have to make sure the # last-but-two element of lines() exists and is empty. if len(lines) < 3 or lines[-2]: error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.') def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): """Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remove all \\ (escaped backslashes) from the line. They are OK, and the # second (escaped) slash may trigger later \" detection erroneously. line = line.replace('\\\\', '') if line.count('/*') > line.count('*/'): error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. ' 'Consider replacing these with //-style comments, ' 'with #if 0...#endif, ' 'or with more clearly structured multi-line comments.') if (line.count('"') - line.count('\\"')) % 2: error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. ' 'Use C++11 raw strings or concatenation instead.') caffe_alt_function_list = ( ('memset', ['caffe_set', 'caffe_memset']), ('cudaMemset', ['caffe_gpu_set', 'caffe_gpu_memset']), ('memcpy', ['caffe_copy', 'caffe_memcpy']), ('cudaMemcpy', ['caffe_copy', 'caffe_gpu_memcpy']), ) def CheckCaffeAlternatives(filename, clean_lines, linenum, error): """Checks for C(++) functions for which a Caffe substitute should be used. For certain native C functions (memset, memcpy), there is a Caffe alternative which should be used instead. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] for function, alts in caffe_alt_function_list: ix = line.find(function + '(') if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and line[ix - 1] not in ('_', '.', '>'))): disp_alts = ['%s(...)' % alt for alt in alts] error(filename, linenum, 'caffe/alt_fn', 2, 'Use Caffe function %s instead of %s(...).' % (' or '.join(disp_alts), function)) def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error): """Except the base classes, Caffe DataLayer should define DataLayerSetUp instead of LayerSetUp. The base DataLayers define common SetUp steps, the subclasses should not override them. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] ix = line.find('DataLayer<Dtype>::LayerSetUp') if ix >= 0 and ( line.find('void DataLayer<Dtype>::LayerSetUp') != -1 or line.find('void ImageDataLayer<Dtype>::LayerSetUp') != -1 or line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != -1 or line.find('void WindowDataLayer<Dtype>::LayerSetUp') != -1): error(filename, linenum, 'caffe/data_layer_setup', 2, 'Except the base classes, Caffe DataLayer should define' + ' DataLayerSetUp instead of LayerSetUp. The base DataLayers' + ' define common SetUp steps, the subclasses should' + ' not override them.') ix = line.find('DataLayer<Dtype>::DataLayerSetUp') if ix >= 0 and ( line.find('void Base') == -1 and line.find('void DataLayer<Dtype>::DataLayerSetUp') == -1 and line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == -1 and line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == -1 and line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == -1): error(filename, linenum, 'caffe/data_layer_setup', 2, 'Except the base classes, Caffe DataLayer should define' + ' DataLayerSetUp instead of LayerSetUp. The base DataLayers' + ' define common SetUp steps, the subclasses should' + ' not override them.') c_random_function_list = ( 'rand(', 'rand_r(', 'random(', ) def CheckCaffeRandom(filename, clean_lines, linenum, error): """Checks for calls to C random functions (rand, rand_r, random, ...). Caffe code should (almost) always use the caffe_rng_* functions rather than these, as the internal state of these C functions is independent of the native Caffe RNG system which should produce deterministic results for a fixed Caffe seed set using Caffe::set_random_seed(...). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] for function in c_random_function_list: ix = line.find(function) # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and line[ix - 1] not in ('_', '.', '>'))): error(filename, linenum, 'caffe/random_fn', 2, 'Use caffe_rng_rand() (or other caffe_rng_* function) instead of ' + function + ') to ensure results are deterministic for a fixed Caffe seed.') threading_list = ( ('asctime(', 'asctime_r('), ('ctime(', 'ctime_r('), ('getgrgid(', 'getgrgid_r('), ('getgrnam(', 'getgrnam_r('), ('getlogin(', 'getlogin_r('), ('getpwnam(', 'getpwnam_r('), ('getpwuid(', 'getpwuid_r('), ('gmtime(', 'gmtime_r('), ('localtime(', 'localtime_r('), ('strtok(', 'strtok_r('), ('ttyname(', 'ttyname_r('), ) def CheckPosixThreading(filename, clean_lines, linenum, error): """Checks for calls to thread-unsafe functions. Much code has been originally written without consideration of multi-threading. Also, engineers are relying on their old experience; they have learned posix before threading extensions were added. These tests guide the engineers to use thread-safe functions (when using posix directly). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] for single_thread_function, multithread_safe_function in threading_list: ix = line.find(single_thread_function) # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and line[ix - 1] not in ('_', '.', '>'))): error(filename, linenum, 'runtime/threadsafe_fn', 2, 'Consider using ' + multithread_safe_function + '...) instead of ' + single_thread_function + '...) for improved thread safety.') def CheckVlogArguments(filename, clean_lines, linenum, error): """Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.') # Matches invalid increment: *count++, which moves pointer instead of # incrementing a value. _RE_PATTERN_INVALID_INCREMENT = re.compile( r'^\s*\*\w+(\+\+|--);') def CheckInvalidIncrement(filename, clean_lines, linenum, error): """Checks for invalid increment *count++. For example following function: void increment_counter(int* count) { *count++; } is invalid, because it effectively does count++, moving pointer, and should be replaced with ++*count, (*count)++ or *count += 1. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if _RE_PATTERN_INVALID_INCREMENT.match(line): error(filename, linenum, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).') class _BlockInfo(object): """Stores information about a generic block of code.""" def __init__(self, seen_open_brace): self.seen_open_brace = seen_open_brace self.open_parentheses = 0 self.inline_asm = _NO_ASM def CheckBegin(self, filename, clean_lines, linenum, error): """Run checks that applies to text up to the opening brace. This is mostly for checking the text after the class identifier and the "{", usually where the base class is specified. For other blocks, there isn't much to check, so we always pass. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def CheckEnd(self, filename, clean_lines, linenum, error): """Run checks that applies to text after the closing brace. This is mostly used for checking end of namespace comments. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass class _ClassInfo(_BlockInfo): """Stores information about a class.""" def __init__(self, name, class_or_struct, clean_lines, linenum): _BlockInfo.__init__(self, False) self.name = name self.starting_linenum = linenum self.is_derived = False if class_or_struct == 'struct': self.access = 'public' self.is_struct = True else: self.access = 'private' self.is_struct = False # Remember initial indentation level for this class. Using raw_lines here # instead of elided to account for leading comments. initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum]) if initial_indent: self.class_indent = len(initial_indent.group(1)) else: self.class_indent = 0 # Try to find the end of the class. This will be confused by things like: # class A { # } *x = { ... # # But it's still good enough for CheckSectionSpacing. self.last_line = 0 depth = 0 for i in range(linenum, clean_lines.NumLines()): line = clean_lines.elided[i] depth += line.count('{') - line.count('}') if not depth: self.last_line = i break def CheckBegin(self, filename, clean_lines, linenum, error): # Look for a bare ':' if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): self.is_derived = True def CheckEnd(self, filename, clean_lines, linenum, error): # Check that closing brace is aligned with beginning of the class. # Only do this if the closing brace is indented by only whitespaces. # This means we will not check single-line class definitions. indent = Match(r'^( *)\}', clean_lines.elided[linenum]) if indent and len(indent.group(1)) != self.class_indent: if self.is_struct: parent = 'struct ' + self.name else: parent = 'class ' + self.name error(filename, linenum, 'whitespace/indent', 3, 'Closing brace should be aligned with beginning of %s' % parent) class _NamespaceInfo(_BlockInfo): """Stores information about a namespace.""" def __init__(self, name, linenum): _BlockInfo.__init__(self, False) self.name = name or '' self.starting_linenum = linenum def CheckEnd(self, filename, clean_lines, linenum, error): """Check end of namespace comments.""" line = clean_lines.raw_lines[linenum] # Check how many lines is enclosed in this namespace. Don't issue # warning for missing namespace comments if there aren't enough # lines. However, do apply checks if there is already an end of # namespace comment and it's incorrect. # # TODO(unknown): We always want to check end of namespace comments # if a namespace is large, but sometimes we also want to apply the # check if a short namespace contained nontrivial things (something # other than forward declarations). There is currently no logic on # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. # # Note that we accept C style "/* */" comments for terminating # namespaces, so that code that terminate namespaces inside # preprocessor macros can be cpplint clean. # # We also accept stuff like "// end of namespace <name>." with the # period at the end. # # Besides these, we don't accept anything else, otherwise we might # get false negatives when existing comment is a substring of the # expected namespace. if self.name: # Named namespace if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace %s"' % self.name) else: # Anonymous namespace if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace"') class _PreprocessorInfo(object): """Stores checkpoints of nesting stacks when #if/#else is seen.""" def __init__(self, stack_before_if): # The entire nesting stack before #if self.stack_before_if = stack_before_if # The entire nesting stack up to #else self.stack_before_else = [] # Whether we have already seen #else or #elif self.seen_else = False class _NestingState(object): """Holds states related to parsing braces.""" def __init__(self): # Stack for tracking all braces. An object is pushed whenever we # see a "{", and popped when we see a "}". Only 3 types of # objects are possible: # - _ClassInfo: a class or struct. # - _NamespaceInfo: a namespace. # - _BlockInfo: some other type of block. self.stack = [] # Stack of _PreprocessorInfo objects. self.pp_stack = [] def SeenOpenBrace(self): """Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace. """ return (not self.stack) or self.stack[-1].seen_open_brace def InNamespaceBody(self): """Check if we are currently one level inside a namespace body. Returns: True if top of the stack is a namespace block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _NamespaceInfo) def UpdatePreprocessor(self, line): """Update preprocessor stack. We need to handle preprocessors due to classes like this: #ifdef SWIG struct ResultDetailsPageElementExtensionPoint { #else struct ResultDetailsPageElementExtensionPoint : public Extension { #endif We make the following assumptions (good enough for most files): - Preprocessor condition evaluates to true from #if up to first #else/#elif/#endif. - Preprocessor condition evaluates to false from #else/#elif up to #endif. We still perform lint checks on these lines, but these do not affect nesting stack. Args: line: current line to check. """ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): # Beginning of #if block, save the nesting stack here. The saved # stack will allow us to restore the parsing state in the #else case. self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) elif Match(r'^\s*#\s*(else|elif)\b', line): # Beginning of #else block if self.pp_stack: if not self.pp_stack[-1].seen_else: # This is the first #else or #elif block. Remember the # whole nesting stack up to this point. This is what we # keep after the #endif. self.pp_stack[-1].seen_else = True self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) # Restore the stack to how it was before the #if self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) else: # TODO(unknown): unexpected #else, issue warning? pass elif Match(r'^\s*#\s*endif\b', line): # End of #if or #else blocks. if self.pp_stack: # If we saw an #else, we will need to restore the nesting # stack to its former state before the #else, otherwise we # will just continue from where we left off. if self.pp_stack[-1].seen_else: # Here we can just use a shallow copy since we are the last # reference to it. self.stack = self.pp_stack[-1].stack_before_else # Drop the corresponding #if self.pp_stack.pop() else: # TODO(unknown): unexpected #endif, issue warning? pass def Update(self, filename, clean_lines, linenum, error): """Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Update pp_stack first self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; # # Templates with class arguments may confuse the parser, for example: # template <class T # class Comparator = less<T>, # class Vector = vector<T> > # class HeapQueue { # # Because this parser has no nesting state about templates, by the # time it saw "class Comparator", it may think that it's a new class. # Nested templates have a similar problem: # template < # typename ExportedType, # typename TupleType, # template <typename, typename> class ImplTemplate> # # To avoid these cases, we ignore classes that are followed by '=' or '>' class_decl_match = Match( r'\s*(template\s*<[\w\s<>,:]*>\s*)?' r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)' r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): self.stack.append(_ClassInfo( class_decl_match.group(4), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(5) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True else: self.stack.append(_BlockInfo(True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2) def InnermostClass(self): """Get class info on the top of the stack. Returns: A _ClassInfo object if we are inside a class, or None otherwise. """ for i in range(len(self.stack), 0, -1): classinfo = self.stack[i - 1] if isinstance(classinfo, _ClassInfo): return classinfo return None def CheckCompletedBlocks(self, filename, error): """Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found. """ # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpplint_unittest.py for an example of this. for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name) def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. Complain about several constructs which gcc-2 accepts, but which are not standard C++. Warning about these in lint is one way to ease the transition to new compilers. - put storage class first (e.g. "static const" instead of "const static"). - "%lld" instead of %qd" in printf-type functions. - "%1$d" is non-standard in printf-type functions. - "\%" is an undefined character escape sequence. - text after #endif is not allowed. - invalid inner-style forward declaration. - >? and <? operators, and their >?= and <?= cousins. Additionally, check for constructor/destructor style violations and reference members, as it is very convenient to do so while checking for gcc-2 compliance. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message """ # Remove comments from the line, but leave in strings for now. line = clean_lines.lines[linenum] if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): error(filename, linenum, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.') if Search(r'printf\s*\(.*".*%\d+\$', line): error(filename, linenum, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.') # Remove escaped backslashes before looking for undefined escapes. line = line.replace('\\\\', '') if Search(r'("|\').*\\(%|\[|\(|{)', line): error(filename, linenum, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.') # For the rest, work with both comments and strings removed. line = clean_lines.elided[linenum] if Search(r'\b(const|volatile|void|char|short|int|long' r'|float|double|signed|unsigned' r'|schar|u?int8|u?int16|u?int32|u?int64)' r'\s+(register|static|extern|typedef)\b', line): error(filename, linenum, 'build/storage_class', 5, 'Storage class (static, extern, typedef, etc) should be first.') if Match(r'\s*#\s*endif\s*[^/\s]+', line): error(filename, linenum, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.') if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): error(filename, linenum, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.') if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): error(filename, linenum, 'build/deprecated', 3, '>? and <? (max and min) operators are non-standard and deprecated.') if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line): # TODO(unknown): Could it be expanded safely to arbitrary references, # without triggering too many false positives? The first # attempt triggered 5 warnings for mostly benign code in the regtest, hence # the restriction. # Here's the original regexp, for the reference: # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?' # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' error(filename, linenum, 'runtime/member_string_references', 2, 'const string& members are dangerous. It is much better to use ' 'alternatives, such as pointers or simple constants.') # Everything else in this function operates on class declarations. # Return early if the top of the nesting stack is not a class, or if # the class head is not completed yet. classinfo = nesting_state.InnermostClass() if not classinfo or not classinfo.seen_open_brace: return # The class may have been declared with namespace or classname qualifiers. # The constructor and destructor will not have those qualifiers. base_classname = classinfo.name.split('::')[-1] # Look for single-argument constructors that aren't marked explicit. # Technically a valid construct, but against style. args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)' % re.escape(base_classname), line) if (args and args.group(1) != 'void' and not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&' % re.escape(base_classname), args.group(1).strip())): error(filename, linenum, 'runtime/explicit', 5, 'Single-argument constructors should be marked explicit.') def CheckSpacingForFunctionCall(filename, line, linenum, error): """Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. line: The text of the line to check. linenum: The number of the line to check. error: The function to call with any errors found. """ # Since function calls often occur inside if/for/while/switch # expressions - which have their own, more liberal conventions - we # first see if we should be looking inside such an expression for a # function call, to which we can apply more strict standards. fncall = line # if there's no control flow construct, look at whole line for pattern in (r'\bif\s*\((.*)\)\s*{', r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]', r'\bswitch\s*\((.*)\)\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) # look inside the parens for function calls break # Except in if/for/while/switch, there should never be space # immediately inside parens (eg "f( 3, 4 )"). We make an exception # for nested parens ( (a+b) + c ). Likewise, there should never be # a space before a ( when it's a function argument. I assume it's a # function argument when the char before the whitespace is legal in # a function name (alnum + _) and we're not starting a macro. Also ignore # pointers and references to arrays and functions coz they're too tricky: # we use a very simple way to recognize these: # " (something)(maybe-something)" or # " (something)(maybe-something," or # " (something)[something]" # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', fncall) and # Ignore pointers/references to functions. not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search(r'\w\s+\(', fncall) and not Search(r'#\s*define|typedef', fncall) and not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)): error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain if Search(r'[^)]\s+\)\s*[^{\s]', fncall): # If the closing parenthesis is preceded by only whitespaces, # try to give a more descriptive error message. if Search(r'^\s+\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') def IsBlankLine(line): """Returns true if the given line is blank. We consider a line to be blank if the line is empty or consists of only white spaces. Args: line: A line of a string. Returns: True, if the given line is blank. """ return not line or line.isspace() def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): """Reports for long function bodies. For an overview why this is done, see: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found. """ lines = clean_lines.lines line = lines[linenum] raw = clean_lines.raw_lines raw_line = raw[linenum] joined_line = '' starting_func = False regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... match_result = Match(regexp, line) if match_result: # If the name is all caps and underscores, figure it's a macro and # ignore it, unless it's TEST or TEST_F. function_name = match_result.group(1).split()[-1] if function_name == 'TEST' or function_name == 'TEST_F' or ( not Match(r'[A-Z_]+$', function_name)): starting_func = True if starting_func: body_found = False for start_linenum in xrange(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += ' ' + start_line.lstrip() if Search(r'(;|})', start_line): # Declarations and trivial functions body_found = True break # ... ignore elif Search(r'{', start_line): body_found = True function = Search(r'((\w|:)*)\(', line).group(1) if Match(r'TEST', function): # Handle TEST... macros parameter_regexp = Search(r'(\(.*\))', joined_line) if parameter_regexp: # Ignore bad syntax function += parameter_regexp.group(1) else: function += '()' function_state.Begin(function) break if not body_found: # No body for the function (or evidence of a non-function) was found. error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif Match(r'^\}\s*$', line): # function end function_state.Check(error, filename, linenum) function_state.End() elif not Match(r'^\s*$', line): function_state.Count() # Count non-blank/non-comment lines. _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') def CheckComment(comment, filename, linenum, error): """Checks for common mistakes in TODO comments. Args: comment: The text of the comment from the line in question. filename: The name of the current file. linenum: The number of the line to check. error: The function to call with any errors found. """ match = _RE_PATTERN_TODO.match(comment) if match: # One whitespace is correct; zero whitespace is handled elsewhere. leading_whitespace = match.group(1) if len(leading_whitespace) > 1: error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if not username: error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like ' '"// TODO(my_username): Stuff."') middle_whitespace = match.group(3) # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison if middle_whitespace != ' ' and middle_whitespace != '': error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') def CheckAccess(filename, clean_lines, linenum, nesting_state, error): """Checks for improper use of DISALLOW* macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' r'DISALLOW_EVIL_CONSTRUCTORS|' r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: # Found DISALLOW* macro outside a class declaration, or perhaps it # was used inside a function when it should have been part of the # class declaration. We could issue a warning here, but it # probably resulted in a compiler error already. pass def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix): """Find the corresponding > to close a template. Args: clean_lines: A CleansedLines instance containing the file. linenum: Current line number. init_suffix: Remainder of the current line after the initial <. Returns: True if a matching bracket exists. """ line = init_suffix nesting_stack = ['<'] while True: # Find the next operator that can tell us whether < is used as an # opening bracket or as a less-than operator. We only want to # warn on the latter case. # # We could also check all other operators and terminate the search # early, e.g. if we got something like this "a<b+c", the "<" is # most likely a less-than operator, but then we will get false # positives for default arguments and other template expressions. match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line) if match: # Found an operator, update nesting stack operator = match.group(1) line = match.group(2) if nesting_stack[-1] == '<': # Expecting closing angle bracket if operator in ('<', '(', '['): nesting_stack.append(operator) elif operator == '>': nesting_stack.pop() if not nesting_stack: # Found matching angle bracket return True elif operator == ',': # Got a comma after a bracket, this is most likely a template # argument. We have not seen a closing angle bracket yet, but # it's probably a few lines later if we look for it, so just # return early here. return True else: # Got some other operator. return False else: # Expecting closing parenthesis or closing bracket if operator in ('<', '(', '['): nesting_stack.append(operator) elif operator in (')', ']'): # We don't bother checking for matching () or []. If we got # something like (] or [), it would have been a syntax error. nesting_stack.pop() else: # Scan the next line linenum += 1 if linenum >= len(clean_lines.elided): break line = clean_lines.elided[linenum] # Exhausted all remaining lines and still no matching angle bracket. # Most likely the input was incomplete, otherwise we should have # seen a semicolon and returned early. return True def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix): """Find the corresponding < that started a template. Args: clean_lines: A CleansedLines instance containing the file. linenum: Current line number. init_prefix: Part of the current line before the initial >. Returns: True if a matching bracket exists. """ line = init_prefix nesting_stack = ['>'] while True: # Find the previous operator match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line) if match: # Found an operator, update nesting stack operator = match.group(2) line = match.group(1) if nesting_stack[-1] == '>': # Expecting opening angle bracket if operator in ('>', ')', ']'): nesting_stack.append(operator) elif operator == '<': nesting_stack.pop() if not nesting_stack: # Found matching angle bracket return True elif operator == ',': # Got a comma before a bracket, this is most likely a # template argument. The opening angle bracket is probably # there if we look for it, so just return early here. return True else: # Got some other operator. return False else: # Expecting opening parenthesis or opening bracket if operator in ('>', ')', ']'): nesting_stack.append(operator) elif operator in ('(', '['): nesting_stack.pop() else: # Scan the previous line linenum -= 1 if linenum < 0: break line = clean_lines.elided[linenum] # Exhausted all earlier lines and still no matching angle bracket. return False def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for the correctness of various spacing issues in the code. Things we check for: spaces around operators, spaces after if/for/while/switch, no spaces around parens in function calls, two spaces between code and comment, don't start a block with a blank line, don't end a function with a blank line, don't add a blank line after public/protected/private, don't have too many blank lines in a row. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw = clean_lines.lines_without_raw_strings line = raw[linenum] # Before nixing comments, check if the line is blank for no good # reason. This includes the first line after a block is opened, and # blank lines at the end of a function (ie, right before a line like '}' # # Skip all the blank line checks if we are immediately inside a # namespace body. In other words, don't issue blank line warnings # for this block: # namespace { # # } # # A warning about missing end of namespace comments will be issued instead. if IsBlankLine(line) and not nesting_state.InNamespaceBody(): elided = clean_lines.elided prev_line = elided[linenum - 1] prevbrace = prev_line.rfind('{') # TODO(unknown): Don't complain if line before blank line, and line after, # both start with alnums and are indented the same amount. # This ignores whitespace at the start of a namespace block # because those are not usually indented. if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: # OK, we have a blank line at the start of a code block. Before we # complain, we check if it is an exception to the rule: The previous # non-empty line has the parameters of a function header that are indented # 4 spaces (because they did not fit in a 80 column line when placed on # the same line as the function name). We also check for the case where # the previous line is indented 6 spaces, which may happen when the # initializers of a constructor do not fit into a 80 column line. exception = False if Match(r' {6}\w', prev_line): # Initializer list? # We are looking for the opening column of initializer list, which # should be indented 4 spaces to cause 6 space indentation afterwards. search_position = linenum-2 while (search_position >= 0 and Match(r' {6}\w', elided[search_position])): search_position -= 1 exception = (search_position >= 0 and elided[search_position][:5] == ' :') else: # Search for the function arguments or an initializer list. We use a # simple heuristic here: If the line is indented 4 spaces; and we have a # closing paren, without the opening paren, followed by an opening brace # or colon (for initializer lists) we assume that it is the last line of # a function header. If we have a colon indented 4 spaces, it is an # initializer list. exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', prev_line) or Match(r' {4}:', prev_line)) if not exception: error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block ' 'should be deleted.') # Ignore blank lines at the end of a block in a long if-else # chain, like this: # if (condition1) { # // Something followed by a blank line # # } else if (condition2) { # // Something else # } if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] if (next_line and Match(r'\s*}', next_line) and next_line.find('} else ') == -1): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block ' 'should be deleted.') matched = Match(r'\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, 'Do not leave a blank line after "%s:"' % matched.group(1)) # Next, we complain if there's a comment too near the text commentpos = line.find('//') if commentpos != -1: # Check if the // may be in quotes. If so, ignore it # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison if (line.count('"', 0, commentpos) - line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes # Allow one space for new scopes, two spaces otherwise: if (not Match(r'^\s*{ //', line) and ((commentpos >= 1 and line[commentpos-1] not in string.whitespace) or (commentpos >= 2 and line[commentpos-2] not in string.whitespace))): error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') # There should always be a space between the // and the comment commentend = commentpos + 2 if commentend < len(line) and not line[commentend] == ' ': # but some lines are exceptions -- e.g. if they're big # comment delimiters like: # //---------------------------------------------------------- # or are an empty C++ style Doxygen comment, like: # /// # or C++ style Doxygen comments placed after the variable: # ///< Header comment # //!< Header comment # or they begin with multiple slashes followed by a space: # //////// Header comment match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or Search(r'^/$', line[commentend:]) or Search(r'^!< ', line[commentend:]) or Search(r'^/< ', line[commentend:]) or Search(r'^/+ ', line[commentend:])) if not match: error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment') CheckComment(line[commentpos:], filename, linenum, error) line = clean_lines.elided[linenum] # get rid of comments and strings # Don't try to do spacing checks for operator methods line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line) # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # Also ignore using ns::operator<<; match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') elif not Match(r'#.*include', line): # Avoid false positives on -> reduced_line = line.replace('->', '') # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Search(r'[^\s<]<([^\s=<].*)', reduced_line) if (match and not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line) if (match and not FindPreviousMatchingAngleBracket(clean_lines, linenum, match.group(1))): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type<type<type>> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1)) # A pet peeve of mine: no spaces after an if, while, switch, or for match = Search(r' (if\(|for\(|while\(|switch\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, 'Missing space before ( in %s' % match.group(1)) # For if/for/while/switch, the left and right parens should be # consistent about how many spaces are inside the parens, and # there should either be zero or one spaces inside the parens. # We don't want: "if ( foo)" or "if ( foo )". # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. match = Search(r'\b(if|for|while|switch)\s*' r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): error(filename, linenum, 'whitespace/parens', 5, 'Mismatching spaces inside () in %s' % match.group(1)) if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, 'Should have zero or one spaces inside ( and ) in %s' % match.group(1)) # You should always have a space after a comma (either as fn arg or operator) # # This does not apply when the non-space character following the # comma is another comma, since the only time when that happens is # for empty macro arguments. # # We run this check in two passes: first pass on elided lines to # verify that lines contain missing whitespaces, second pass on raw # lines to confirm that those missing whitespaces are not due to # elided comments. if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') # You should always have a space after a semicolon # except for few corner cases # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more # space after ; if Search(r';[^\s};\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') # Next we will look for issues with function calls. CheckSpacingForFunctionCall(filename, line, linenum, error) # Except after an opening paren, or after another opening brace (in case of # an initializer list, for instance), you should have spaces before your # braces. And since you should never have braces at the beginning of a line, # this is an easy test. match = Match(r'^(.*[^ ({]){', line) if match: # Try a bit harder to check for brace initialization. This # happens in one of the following forms: # Constructor() : initializer_list_{} { ... } # Constructor{}.MemberFunction() # Type variable{}; # FunctionCall(type{}, ...); # LastArgument(..., type{}); # LOG(INFO) << type{} << " ..."; # map_of_type[{...}] = ...; # # We check for the character following the closing brace, and # silence the warning if it's one of those listed above, i.e. # "{.;,)<]". # # To account for nested initializer list, we allow any number of # closing braces up to "{;,)<". We can't simply silence the # warning on first sight of closing brace, because that would # cause false negatives for things that are not initializer lists. # Silence this: But not this: # Outer{ if (...) { # Inner{...} if (...){ // Missing space before { # }; } # # There is a false negative with this approach if people inserted # spurious semicolons, e.g. "if (cond){};", but we will catch the # spurious semicolon with a separate check. (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') # Make sure '} else {' has spaces. if Search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') # You shouldn't have spaces before your brackets, except maybe after # 'delete []' or 'new char * []'. if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') # You shouldn't have a space before a semicolon at the end of the line. # There's a special case for "for" since the style guide allows space before # the semicolon there. if Search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.') # In range-based for, we wanted spaces before and after the colon, but # not around "::" tokens that might appear. if (Search('for *\(.*[^:]:[^: ]', line) or Search('for *\(.*[^: ]:[^:]', line)): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): """Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found. """ # Skip checks if the class is small, where small means 25 lines or less. # 25 lines seems like a good cutoff since that's the usual height of # terminals, and any class that can't fit in one screen can't really # be considered "small". # # Also skip checks if we are on the first line. This accounts for # classes that look like # class Foo { public: ... }; # # If we didn't find the end of the class, last_line would be zero, # and the check will be skipped by the first condition. if (class_info.last_line - class_info.starting_linenum <= 24 or linenum <= class_info.starting_linenum): return matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains # "class" or "struct". This can happen two ways: # - We are at the beginning of the class. # - We are forward-declaring an inner class that is semantically # private, but needed to be public for implementation reasons. # Also ignores cases where the previous line ends with a backslash as can be # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and not Search(r'\b(class|struct)\b', prev_line) and not Search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1)) def GetPreviousNonBlankLine(clean_lines, linenum): """Return the most recent non-blank line and its line number. Args: clean_lines: A CleansedLines instance containing the file contents. linenum: The number of the line to check. Returns: A tuple with two elements. The first element is the contents of the last non-blank line before the current line, or the empty string if this is the first non-blank line. The second is the line number of that line, or -1 if this is the first non-blank line. """ prevlinenum = linenum - 1 while prevlinenum >= 0: prevline = clean_lines.elided[prevlinenum] if not IsBlankLine(prevline): # if not a blank line... return (prevline, prevlinenum) prevlinenum -= 1 return ('', -1) def CheckBraces(filename, clean_lines, linenum, error): """Looks for misplaced braces (e.g. at the end of line). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings if Match(r'\s*{\s*$', line): # We allow an open brace to start a line in the case where someone is using # braces in a block to explicitly create a new scope, which is commonly used # to control the lifetime of stack-allocated variables. Braces are also # used for brace initializers inside function calls. We don't detect this # perfectly: we just don't complain if the last non-whitespace character on # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the # previous line starts a preprocessor block. prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if (not Search(r'[,;:}{(]\s*$', prevline) and not Match(r'\s*#', prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') # An else clause should be on the same line as the preceding closing brace. if Match(r'\s*else\s*', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match(r'\s*}\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') # If braces come on one side of an else, they should be on both. # However, we have to worry about "else if" that spans multiple lines! if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if # find the ( after the if pos = line.find('else if') pos = line.find('(', pos) if pos > 0: (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) if endline[endpos:].find('{') == -1: # must be brace after if error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') else: # common case: else not followed by a multi-line if error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') # Likewise, an else should never have the else clause on the same line if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') # In the same way, a do/while should never be on one line if Match(r'\s*do [^\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on compound # literals. closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or Search(r'\s+=\s*$', line_prefix)): match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }") def CheckEmptyBlockBody(filename, clean_lines, linenum, error): """Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Search for loop keywords at the beginning of the line. Because only # whitespaces are allowed before the keywords, this will also ignore most # do-while-loops, since those lines should start with closing brace. # # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: # Find the end of the conditional expression (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') def CheckCheck(filename, clean_lines, linenum, error): """Checks the use of CHECK and EXPECT macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Decide the set of replacement macros that should be suggested lines = clean_lines.elided check_macro = None start_pos = -1 for macro in _CHECK_MACROS: i = lines[linenum].find(macro) if i >= 0: check_macro = macro # Find opening parenthesis. Do a regular expression match here # to make sure that we are matching the expected CHECK macro, as # opposed to some other macro that happens to contain the CHECK # substring. matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum]) if not matched: continue start_pos = len(matched.group(1)) break if not check_macro or start_pos < 0: # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT' return # Find end of the boolean expression by matching parentheses (last_line, end_line, end_pos) = CloseExpression( clean_lines, linenum, start_pos) if end_pos < 0: return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] # Parse expression so that we can take parentheses into account. # This avoids false positives for inputs like "CHECK((a < 4) == b)", # which is not replaceable by CHECK_LE. lhs = '' rhs = '' operator = None while expression: matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' r'==|!=|>=|>|<=|<|\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': # Parenthesized operand expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')') if end < 0: return # Unmatched parenthesis lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): # Logical and/or operators. This means the expression # contains more than one term, for example: # CHECK(42 < a && a < b); # # These are not replaceable with CHECK_LE, so bail out early. return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): # Non-relational operator lhs += token expression = matched.group(2) else: # Relational operator operator = token rhs = matched.group(2) break else: # Unparenthesized operand. Instead of appending to lhs one character # at a time, we do another regular expression match to consume several # characters at once if possible. Trivial benchmark shows that this # is more efficient when the operands are longer than a single # character, which is generally the case. matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match(r'^(\s*\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) # Only apply checks if we got all parts of the boolean expression if not (lhs and operator and rhs): return # Check that rhs do not contain logical operators. We already know # that lhs is fine since the loop above parses out && and ||. if rhs.find('&&') > -1 or rhs.find('||') > -1: return # At least one of the operands must be a constant literal. This is # to avoid suggesting replacements for unprintable things like # CHECK(variable != iterator) # # The following pattern matches decimal, hex integers, strings, and # characters (in that order). lhs = lhs.strip() rhs = rhs.strip() match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): # Note: since we know both lhs and rhs, we can provide a more # descriptive error message like: # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) # Instead of: # Consider using CHECK_EQ instead of CHECK(a == b) # # We are still keeping the less descriptive message because if lhs # or rhs gets long, the error message might become unreadable. error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % ( _CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)) def CheckAltTokens(filename, clean_lines, linenum, error): """Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) def GetLineWidth(line): """Determines the width of the line in column positions. Args: line: A string, which may be a Unicode string. Returns: The width of the line in column positions, accounting for Unicode combining characters and wide characters. """ if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 return width else: return len(line) def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') # One or three blank spaces at the beginning of the line is weird; it's # hard to reconcile that with 2-space indents. # NOTE: here are the conditions rob pike used for his tests. Mine aren't # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces # if(RLENGTH > 20) complain = 0; # if(match($0, " +(error|private|public|protected):")) complain = 0; # if(match(prev, "&& *$")) complain = 0; # if(match(prev, "\\|\\| *$")) complain = 0; # if(match(prev, "[\",=><] *$")) complain = 0; # if(match($0, " <<")) complain = 0; # if(match(prev, " +for \\(")) complain = 0; # if(prevodd && match(prevprev, " +for \\(")) complain = 0; initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') # There are certain situations we allow one space, notably for section labels elif ((initial_spaces == 1 or initial_spaces == 3) and not Match(r'\s*\w+\s*:\s*$', cleansed_line)): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 2-space indent?') # Check if the line is a header guard. is_header_guard = False if file_extension == 'h': cppvar = GetHeaderGuardCPPVariable(filename) if (line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar)): is_header_guard = True # #include lines and header guards can be long, since there's no clean way to # split them. # # URLs can be long too. It's possible to split these, but it makes them # harder to cut&paste. # # The "$Id:...$" comment may also get very long without it being the # developers fault. if (not line.startswith('#include') and not is_header_guard and not Match(r'^\s*//.*http(s?)://\S*$', line) and not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): line_width = GetLineWidth(line) extended_length = int((_line_length * 1.25)) if line_width > extended_length: error(filename, linenum, 'whitespace/line_length', 4, 'Lines should very rarely be longer than %i characters' % extended_length) elif line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if (cleansed_line.count(';') > 1 and # for loops are allowed two ;'s (and may run over two lines). cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and # It's ok to have many commands in a switch case that fits in 1 line not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') # Some more style checks CheckBraces(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() if classinfo: CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) _RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"') _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') # Matches the first component of a filename delimited by -s and _s. That is: # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0] def _IsTestFilename(filename): """Determines if the given filename has a suffix that identifies it as a test. Args: filename: The input filename. Returns: True if 'filename' looks like a test, False otherwise. """ if (filename.endswith('_test.cc') or filename.endswith('_unittest.cc') or filename.endswith('_regtest.cc')): return True else: return False def _ClassifyInclude(fileinfo, include, is_system): """Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER """ # This is a list of all standard c++ header files, except # those already checked for above. is_cpp_h = include in _CPP_HEADERS if is_system: if is_cpp_h: return _CPP_SYS_HEADER else: return _C_SYS_HEADER # If the target file and the include we're checking share a # basename when we drop common extensions, and the include # lives in . , then it's likely to be owned by the target file. target_dir, target_base = ( os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) if target_base == include_base and ( include_dir == target_dir or include_dir == os.path.normpath(target_dir + '/../public')): return _LIKELY_MY_HEADER # If the target and include share some initial basename # component, it's possible the target is implementing the # include, so it's allowed to be first, but we'll never # complain if it's not there. target_first_component = _RE_FIRST_COMPONENT.match(target_base) include_first_component = _RE_FIRST_COMPONENT.match(include_base) if (target_first_component and include_first_component and target_first_component.group(0) == include_first_component.group(0)): return _POSSIBLE_MY_HEADER return _OTHER_HEADER def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): """Check rules that are applicable to #include lines. Strings on #include lines are NOT removed from elided line, to make certain tasks easier. However, to prevent false positives, checks applicable to #include lines in CheckLanguage must be put here. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. include_state: An _IncludeState instance in which the headers are inserted. error: The function to call with any errors found. """ fileinfo = FileInfo(filename) line = clean_lines.lines[linenum] # "include" should use the new style "foo/bar.h" instead of just "bar.h" if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line): error(filename, linenum, 'build/include_dir', 4, 'Include the directory when naming .h files') # we shouldn't include a file more than once. actually, there are a # handful of instances where doing so is okay, but in general it's # not. match = _RE_PATTERN_INCLUDE.search(line) if match: include = match.group(2) is_system = (match.group(1) == '<') if include in include_state: error(filename, linenum, 'build/include', 4, '"%s" already included at %s:%s' % (include, filename, include_state[include])) else: include_state[include] = linenum # We want to ensure that headers appear in the right order: # 1) for foo.cc, foo.h (preferred location) # 2) c system files # 3) cpp system files # 4) for foo.cc, foo.h (deprecated location) # 5) other google headers # # We classify each include statement as one of those 5 types # using a number of techniques. The include_state object keeps # track of the highest type seen, and complains if we see a # lower type after that. error_message = include_state.CheckNextIncludeOrder( _ClassifyInclude(fileinfo, include, is_system)) if error_message: error(filename, linenum, 'build/include_order', 4, '%s. Should be: %s.h, c system, c++ system, other.' % (error_message, fileinfo.BaseName())) canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) if not include_state.IsInAlphabeticalOrder( clean_lines, linenum, canonical_include): error(filename, linenum, 'build/include_alpha', 4, 'Include "%s" not in alphabetical order' % include) include_state.SetLastHeader(canonical_include) # Look for any of the stream classes that are part of standard C++. match = _RE_PATTERN_INCLUDE.match(line) if match: include = match.group(2) if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): # Many unit tests use cout, so we exempt them. if not _IsTestFilename(filename): error(filename, linenum, 'readability/streams', 3, 'Streams are highly discouraged.') def _GetTextInside(text, start_pattern): r"""Retrieves all the text between matching open and close parentheses. Given a string of lines and a regular expression string, retrieve all the text following the expression and between opening punctuation symbols like (, [, or {, and the matching close-punctuation symbol. This properly nested occurrences of the punctuations, so for the text like printf(a(), b(c())); a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. start_pattern must match string having an open punctuation symbol at the end. Args: text: The lines to extract text. Its comments and strings must be elided. It can be single line and can span multiple lines. start_pattern: The regexp string indicating where to start extracting the text. Returns: The extracted text. None if either the opening string or ending punctuation could not be found. """ # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably # rewritten to use _GetTextInside (and use inferior regexp matching today). # Give opening punctuations to get the matching close-punctuations. matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(matching_punctuation.itervalues()) # Find the position to start extracting text. match = re.search(start_pattern, text, re.M) if not match: # start_pattern not found in text. return None start_position = match.end(0) assert start_position > 0, ( 'start_pattern must ends with an opening punctuation.') assert text[start_position - 1] in matching_punctuation, ( 'start_pattern must ends with an opening punctuation.') # Stack of closing punctuations we expect to have in text after position. punctuation_stack = [matching_punctuation[text[start_position - 1]]] position = start_position while punctuation_stack and position < len(text): if text[position] == punctuation_stack[-1]: punctuation_stack.pop() elif text[position] in closing_punctuation: # A closing punctuation without matching opening punctuations. return None elif text[position] in matching_punctuation: punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: # Opening punctuations left without matching close-punctuations. return None # punctuations match. return text[start_position:position - 1] # Patterns for matching call-by-reference parameters. # # Supports nested templates up to 2 levels deep using this messy pattern: # < (?: < (?: < [^<>]* # > # | [^<>] )* # > # | [^<>] )* # > _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* _RE_PATTERN_TYPE = ( r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' r'(?:\w|' r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' r'::)+') # A call-by-reference parameter ends with '& identifier'. _RE_PATTERN_REF_PARAM = re.compile( r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') # A call-by-const-reference parameter either ends with 'const& identifier' # or looks like 'const type& identifier' when 'type' is atomic. _RE_PATTERN_CONST_REF_PARAM = ( r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): """Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # If the line is empty or consists of entirely a comment, no need to # check it. line = clean_lines.elided[linenum] if not line: return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line): include_state.ResetSection() # Make Windows paths like Unix. fullname = os.path.abspath(filename).replace('\\', '/') # TODO(unknown): figure out if they're using default arguments in fn proto. # Check to see if they're using an conversion function cast. # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. match = Search( r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) if match: matched_new = match.group(1) matched_type = match.group(2) matched_funcptr = match.group(3) # gMock methods are defined using some variant of MOCK_METHODx(name, type) # where type may be float(), int(string), etc. Without context they are # virtually indistinguishable from int(x) casts. Likewise, gMock's # MockCallback takes a template parameter of the form return_type(arg_type), # which looks much like the cast we're trying to detect. # # std::function<> wrapper has a similar problem. # # Return types for function pointers also look like casts if they # don't have an extra space. if (matched_new is None and # If new operator, then this isn't a cast not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or Search(r'\bMockCallback<.*>', line) or Search(r'\bstd::function<.*>', line)) and not (matched_funcptr and Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr))): # Try a bit harder to catch gmock lines: the only place where # something looks like an old-style cast is where we declare the # return type of the mocked method, and the only time when we # are missing context is if MOCK_METHOD was split across # multiple lines. The missing MOCK_METHOD is usually one or two # lines back, so scan back one or two lines. # # It's not possible for gmock macros to appear in the first 2 # lines, since the class head + section name takes up 2 lines. if (linenum < 2 or not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]))): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched_type) CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # # (char *) "foo" should always be a const_cast (reinterpret_cast won't # compile). if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): pass else: # Check pointer casts for other than string constants CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This # is dangerous -- casts can assign to temporaries, so the pointer doesn't # point where you think. match = Search( r'(?:&\(([^)]+)\)[\w(])|' r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line) if match and match.group(1) != '*': error(filename, linenum, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after')) # Create an extended_line, which is the concatenation of the current and # next lines, for more effective checking of code that may span more than one # line. if linenum + 1 < clean_lines.NumLines(): extended_line = line + clean_lines.elided[linenum + 1] else: extended_line = line # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Make sure it's not a function. # Function template specialization looks like: "string foo<Type>(...". # Class template definitions look like: "string Foo<Type>::Method(...". # # Also ignore things that look like operators. These are matched separately # because operator names cross non-word boundaries. If we change the pattern # above, we would decrease the accuracy of matching identifiers. if (match and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') if file_extension == 'h': # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS # (level 1 error) pass # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. if Search(r'\bshort port\b', line): if not Search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\b', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\b', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1)) # Check if some verboten operator overloading is going on
# class X {}; # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& if Search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { if Search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). # We constrain the pattern not to pick things like DocidForPrintf(foo). # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) # TODO(sugawarayu): Catch the following case. Need to change the calling # convention of the whole function to process multiple line to handle it. # printf( # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: match = Match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2))) if Search(r'\busing namespace\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') # Detect variable-length arrays. match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. # If any of the resulting tokens are not compile time constants then # report the error. tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search(r'sizeof\(.+\)', tok): continue if Search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue if Match(r'\d+', tok): continue if Match(r'0[xX][0-9a-fA-F]+', tok): continue if Match(r'k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. if tok.startswith('sizeof'): skip_next = True continue is_const = False break if not is_const: error(filename, linenum, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing # in the class declaration. match = Match( (r'\s*' r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' r'\(.*\);$'), line) if match and linenum + 1 < clean_lines.NumLines(): next_line = clean_lines.elided[linenum + 1] # We allow some, but not all, declarations of variables to be present # in the statement that defines the class. The [\w\*,\s]* fragment of # the regular expression below allows users to declare instances of # the class or pointers to instances, but not less common types such # as function pointers or arrays. It's a tradeoff between allowing # reasonable code and avoiding trying to parse more C++ using regexps. if not Search(r'^\s*}[\w\*,\s]*;', next_line): error(filename, linenum, 'readability/constructors', 3, match.group(1) + ' should be the last thing in the class') # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. if (file_extension == 'h' and Search(r'\bnamespace\s*{', line) and line[-1] != '\\'): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.') def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): """Check for non-const references. Separate from CheckLanguage since it scans backwards from current line, instead of scanning forward. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Do nothing if there is no '&' on current line. line = clean_lines.elided[linenum] if '&' not in line: return # Long type names may be broken across multiple lines, usually in one # of these forms: # LongType # ::LongTypeContinued &identifier # LongType:: # LongTypeContinued &identifier # LongType< # ...>::LongTypeContinued &identifier # # If we detected a type split across two lines, join the previous # line to current line so that we can match const references # accordingly. # # Note that this only scans back one line, since scanning back # arbitrary number of lines would be expensive. If you have a type # that spans more than 2 lines, please use a typedef. if linenum > 1: previous = None if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): # previous_line\n + ::current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', clean_lines.elided[linenum - 1]) elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): # previous_line::\n + current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', clean_lines.elided[linenum - 1]) if previous: line = previous.group(1) + line.lstrip() else: # Check for templated parameter that is split across multiple lines endpos = line.rfind('>') if endpos > -1: (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, endpos) if startpos > -1 and startline < linenum: # Found the matching < on an earlier line, collect all # pieces up to current line. line = '' for i in xrange(startline, linenum + 1): line += clean_lines.elided[i].strip() # Check for non-const references in function parameters. A single '&' may # found in the following places: # inside expression: binary & for bitwise AND # inside expression: unary & for taking the address of something # inside declarators: reference parameter # We will exclude the first two cases by checking that we are not inside a # function body, including one that was just introduced by a trailing '{'. # TODO(unknwon): Doesn't account for preprocessor directives. # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. check_params = False if not nesting_state.stack: check_params = True # top level elif (isinstance(nesting_state.stack[-1], _ClassInfo) or isinstance(nesting_state.stack[-1], _NamespaceInfo)): check_params = True # within class or namespace elif Match(r'.*{\s*$', line): if (len(nesting_state.stack) == 1 or isinstance(nesting_state.stack[-2], _ClassInfo) or isinstance(nesting_state.stack[-2], _NamespaceInfo)): check_params = True # just opened global/class/namespace block # We allow non-const references in a few standard places, like functions # called "swap()" or iostream operators like "<<" or ">>". Do not check # those function parameters. # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') if Search(whitelisted_functions, line): check_params = False elif not Search(r'\S+\([^)]*$', line): # Don't see a whitelisted function on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. for i in xrange(2): if (linenum > i and Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): check_params = False break if check_params: decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): error(filename, linenum, 'runtime/references', 2, 'Is this a non-const reference? ' 'If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter)) def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. linenum: The number of the line to check. line: The line of code to check. raw_line: The raw line of code to check, with comments. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise. """ match = Search(pattern, line) if not match: return False # Exclude lines with sizeof, since sizeof looks like a cast. sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1]) if sizeof_match: return False # operator++(int) and operator--(int) if (line[0:match.start(1) - 1].endswith(' operator++') or line[0:match.start(1) - 1].endswith(' operator--')): return False # A single unnamed argument for a function tends to look like old # style cast. If we see those, don't issue warnings for deprecated # casts, instead issue warnings for unnamed arguments where # appropriate. # # These are things that we want warnings for, since the style guide # explicitly require all parameters to be named: # Function(int); # Function(int) { # ConstMember(int) const; # ConstMember(int) const { # ExceptionMember(int) throw (...); # ExceptionMember(int) throw (...) { # PureVirtual(int) = 0; # # These are functions of some sort, where the compiler would be fine # if they had named parameters, but people often omit those # identifiers to reduce clutter: # (FunctionPointer)(int); # (FunctionPointer)(int) = value; # Function((function_pointer_arg)(int)) # <TemplateArgument(int)>; # <(FunctionPointerTemplateArgument)(int)>; remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder): # Looks like an unnamed parameter. # Don't warn on any kind of template arguments. if Match(r'^\s*>', remainder): return False # Don't warn on assignments to function pointers, but keep warnings for # unnamed parameters to pure virtual functions. Note that this pattern # will also pass on assignments of "0" to function pointers, but the # preferred values for those would be "nullptr" or "NULL". matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) if matched_zero and matched_zero.group(1) != '0': return False # Don't warn on function pointer declarations. For this we need # to check what came before the "(type)" string. if Match(r'.*\)\s*$', line[0:match.start(0)]): return False # Don't warn if the parameter is named with block comments, e.g.: # Function(int /*unused_param*/); if '/*' in raw_line: return False # Passed all filters, issue warning here. error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function') return True # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True _HEADERS_CONTAINING_TEMPLATES = ( ('<deque>', ('deque',)), ('<functional>', ('unary_function', 'binary_function', 'plus', 'minus', 'multiplies', 'divides', 'modulus', 'negate', 'equal_to', 'not_equal_to', 'greater', 'less', 'greater_equal', 'less_equal', 'logical_and', 'logical_or', 'logical_not', 'unary_negate', 'not1', 'binary_negate', 'not2', 'bind1st', 'bind2nd', 'pointer_to_unary_function', 'pointer_to_binary_function', 'ptr_fun', 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', 'mem_fun_ref_t', 'const_mem_fun_t', 'const_mem_fun1_t', 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', 'mem_fun_ref', )), ('<limits>', ('numeric_limits',)), ('<list>', ('list',)), ('<map>', ('map', 'multimap',)), ('<memory>', ('allocator',)), ('<queue>', ('queue', 'priority_queue',)), ('<set>', ('set', 'multiset',)), ('<stack>', ('stack',)), ('<string>', ('char_traits', 'basic_string',)), ('<utility>', ('pair',)), ('<vector>', ('vector',)), # gcc extensions. # Note: std::hash is their hash, ::hash is our hash ('<hash_map>', ('hash_map', 'hash_multimap',)), ('<hash_set>', ('hash_set', 'hash_multiset',)), ('<slist>', ('slist',)), ) _RE_PATTERN_STRING = re.compile(r'\bstring\b') _re_pattern_algorithm_header = [] for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', 'transform'): # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or # type::max(). _re_pattern_algorithm_header.append( (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), _template, '<algorithm>')) _re_pattern_templates = [] for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: for _template in _templates: _re_pattern_templates.append( (re.compile(r'(\<|\b)' + _template + r'\s*\<'), _template + '<>', _header)) def FilesBelongToSameModule(filename_cc, filename_h): """Check if these two filenames belong to the same module. The concept of a 'module' here is a as follows: foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the same 'module' if they are in the same directory. some/path/public/xyzzy and some/path/internal/xyzzy are also considered to belong to the same module here. If the filename_cc contains a longer path than the filename_h, for example, '/absolute/path/to/base/sysinfo.cc', and this file would include 'base/sysinfo.h', this function also produces the prefix needed to open the header. This is used by the caller of this function to more robustly open the header file. We don't have access to the real include paths in this context, so we need this guesswork here. Known bugs: tools/base/bar.cc and base/bar.h belong to the same module according to this implementation. Because of this, this function gives some false positives. This should be sufficiently rare in practice. Args: filename_cc: is the path for the .cc file filename_h: is the path for the header path Returns: Tuple with a bool and a string: bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file. """ if not filename_cc.endswith('.cc'): return (False, '') filename_cc = filename_cc[:-len('.cc')] if filename_cc.endswith('_unittest'): filename_cc = filename_cc[:-len('_unittest')] elif filename_cc.endswith('_test'): filename_cc = filename_cc[:-len('_test')] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if not filename_h.endswith('.h'): return (False, '') filename_h = filename_h[:-len('.h')] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:-len(filename_h)] return files_belong_to_same_module, common_path def UpdateIncludeState(filename, include_state, io=codecs): """Fill up the include_state with new includes found from the file. Args: filename: the name of the header to read. include_state: an _IncludeState instance in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was succesfully added. False otherwise. """ headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) # The value formatting is cute, but not really used right now. # What matters here is that the key is in include_state. include_state.setdefault(include, '%s:%d' % (filename, linenum)) return True def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection. """ required = {} # A map of header name to linenumber and the template entity. # Example of required: { '<functional>': (1219, 'less<>') } for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required['<string>'] = (linenum, 'string') for pattern, template, header in _re_pattern_algorithm_header: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: if pattern.search(line): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's copy the include_state so it is only messed up within this function. include_state = include_state.copy() # Did we find the header for this file (if any) and succesfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_state is modified during iteration, so we iterate over a copy of # the keys. header_keys = include_state.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_state, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if filename.endswith('.cc') and not header_found: return # All the lines have been processed, report the errors found. for required_header_unstripped in required: template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_state: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template) _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): """Check that make_pair's template arguments are deduced. G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are specified explicitly, and such use isn't intended in any case. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) if match: error(filename, linenum, 'build/explicit_make_pair', 4, # 4 = high confidence 'For C++11-compatibility, omit template arguments from make_pair' ' OR use pair directly OR if appropriate, construct a pair directly') def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM: return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckCaffeAlternatives(filename, clean_lines, line, error) CheckCaffeDataLayerSetUp(filename, clean_lines, line, error) CheckCaffeRandom(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error) def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=[]): """Performs lint checks and reports any errors to the given error function. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ lines = (['// marker so line numbers and indices both start at 1'] + lines + ['// marker so line numbers end in a known way']) include_state = _IncludeState() function_state = _FunctionState() nesting_state = _NestingState() ResetNolintSuppressions() CheckForCopyright(filename, lines, error) if file_extension == 'h': CheckForHeaderGuard(filename, lines, error) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) for line in xrange(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) nesting_state.CheckCompletedBlocks(filename, error) CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) # We check here rather than inside ProcessLine so that we see raw # lines rather than "cleaned" lines. CheckForBadCharacters(filename, lines, error) CheckForNewlineAtEOF(filename, lines, error) def ProcessFile(filename, vlevel, extra_check_functions=[]): """Does google-lint on a single file. Args: filename: The name of the file to parse. vlevel: The level of errors to report. Every error of confidence >= verbose_level will be reported. 0 is a good default. extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ _SetVerboseLevel(vlevel) try: # Support the UNIX convention of using "-" for stdin. Note that # we are not opening the file with universal newline support # (which codecs doesn't support anyway), so the resulting lines do # contain trailing '\r' characters if we are reading a file that # has CRLF endings. # If after the split a trailing '\r' is present, it is removed # below. If it is not expected to be present (i.e. os.linesep != # '\r\n' as in Windows), a warning is issued below if this file # is processed. if filename == '-': lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') carriage_return_found = False # Remove trailing '\r'. for linenum in range(len(lines)): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') carriage_return_found = True except IOError: sys.stderr.write( "Skipping input '%s': Can't open for reading\n" % filename) return # Note, if no dot is found, this will give the entire filename as the ext. file_extension = filename[filename.rfind('.') + 1:] # When reading from stdin, the extension is unknown, so no cpplint tests # should rely on the extension. if filename != '-' and file_extension not in _valid_extensions: sys.stderr.write('Ignoring %s; not a valid file name ' '(%s)\n' % (filename, ', '.join(_valid_extensions))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) if carriage_return_found and os.linesep != '\r\n': # Use 0 for linenum since outputting only one error for potentially # several lines. Error(filename, 0, 'whitespace/newline', 1, 'One or more unexpected \\r (^M) found;' 'better to use only a \\n') sys.stderr.write('Done processing %s\n' % filename) def PrintUsage(message): """Prints a brief usage string and exits, optionally with an error message. Args: message: The optional error message. """ sys.stderr.write(_USAGE) if message: sys.exit('\nFATAL ERROR: ' + message) else: sys.exit(1) def PrintCategories(): """Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter. """ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) sys.exit(0) def ParseArguments(args): """Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint. """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions=']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse'): PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') output_format = val elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') if not filenames: PrintUsage('No files were specified.') _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames def main(): filenames = ParseArguments(sys.argv[1:]) # Change stderr to write with replacement characters so we don't die # if we try to print something containing non-ASCII characters. sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace') _cpplint_state.ResetErrorCounts() for filename in filenames: ProcessFile(filename, _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() sys.exit(_cpplint_state.error_count > 0) if __name__ == '__main__': main()
# TODO(unknown): catch out-of-line unary operator&:
page.go
// Copyright 2018 The Hugo Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package hugolib import ( "bytes" "context" "errors" "fmt" "reflect" "github.com/gohugoio/hugo/common/maps" "github.com/gohugoio/hugo/media" _errors "github.com/pkg/errors" "github.com/gohugoio/hugo/langs" "github.com/gohugoio/hugo/related" "github.com/bep/gitmap" "github.com/gohugoio/hugo/helpers" "github.com/gohugoio/hugo/hugolib/pagemeta" "github.com/gohugoio/hugo/resource" "github.com/gohugoio/hugo/output" "github.com/mitchellh/mapstructure" "html/template" "io" "path" "path/filepath" "regexp" "runtime" "strings" "sync" "time" "unicode/utf8" "github.com/gohugoio/hugo/compare" "github.com/gohugoio/hugo/source" "github.com/spf13/cast" ) var ( cjk = regexp.MustCompile(`\p{Han}|\p{Hangul}|\p{Hiragana}|\p{Katakana}`) // This is all the kinds we can expect to find in .Site.Pages. allKindsInPages = []string{KindPage, KindHome, KindSection, KindTaxonomy, KindTaxonomyTerm} allKinds = append(allKindsInPages, []string{kindRSS, kindSitemap, kindRobotsTXT, kind404}...) // Assert that it implements the Eqer interface. _ compare.Eqer = (*Page)(nil) _ compare.Eqer = (*PageOutput)(nil) // Assert that it implements the interface needed for related searches. _ related.Document = (*Page)(nil) ) const ( KindPage = "page" // The rest are node types; home page, sections etc. KindHome = "home" KindSection = "section" KindTaxonomy = "taxonomy" KindTaxonomyTerm = "taxonomyTerm" // Temporary state. kindUnknown = "unknown" // The following are (currently) temporary nodes, // i.e. nodes we create just to render in isolation. kindRSS = "RSS" kindSitemap = "sitemap" kindRobotsTXT = "robotsTXT" kind404 = "404" pageResourceType = "page" ) type Page struct { *pageInit *pageContentInit // Kind is the discriminator that identifies the different page types // in the different page collections. This can, as an example, be used // to to filter regular pages, find sections etc. // Kind will, for the pages available to the templates, be one of: // page, home, section, taxonomy and taxonomyTerm. // It is of string type to make it easy to reason about in // the templates. Kind string // Since Hugo 0.18 we got rid of the Node type. So now all pages are ... // pages (regular pages, home page, sections etc.). // Sections etc. will have child pages. These were earlier placed in .Data.Pages, // but can now be more intuitively also be fetched directly from .Pages. // This collection will be nil for regular pages. Pages Pages // Since Hugo 0.32, a Page can have resources such as images and CSS associated // with itself. The resource will typically be placed relative to the Page, // but templates should use the links (Permalink and RelPermalink) // provided by the Resource object. Resources resource.Resources // This is the raw front matter metadata that is going to be assigned to // the Resources above. resourcesMetadata []map[string]interface{} // translations will contain references to this page in other language // if available. translations Pages // A key that maps to translation(s) of this page. This value is fetched // from the page front matter. translationKey string // Params contains configuration defined in the params section of page frontmatter. params map[string]interface{} // Content sections contentv template.HTML summary template.HTML TableOfContents template.HTML // Passed to the shortcodes pageWithoutContent *PageWithoutContent Aliases []string Images []Image Videos []Video truncated bool Draft bool Status string // PageMeta contains page stats such as word count etc. PageMeta // Markup contains the markup type for the content. Markup string extension string contentType string Layout string // For npn-renderable pages (see IsRenderable), the content itself // is used as template and the template name is stored here. selfLayout string linkTitle string // Content items. pageContent // whether the content is in a CJK language. isCJKLanguage bool // the content stripped for HTML plain string // TODO should be []byte plainWords []string // rendering configuration renderingConfig *helpers.BlackFriday // menus pageMenus PageMenus source.File Position `json:"-"` GitInfo *gitmap.GitInfo // This was added as part of getting the Nodes (taxonomies etc.) to work as // Pages in Hugo 0.18. // It is deliberately named similar to Section, but not exported (for now). // We currently have only one level of section in Hugo, but the page can live // any number of levels down the file path. // To support taxonomies like /categories/hugo etc. we will need to keep track // of that information in a general way. // So, sections represents the path to the content, i.e. a content file or a // virtual content file in the situations where a taxonomy or a section etc. // isn't accomanied by one. sections []string // Will only be set for sections and regular pages. parent *Page // When we create paginator pages, we create a copy of the original, // but keep track of it here. origOnCopy *Page // Will only be set for section pages and the home page. subSections Pages s *Site // Pulled over from old Node. TODO(bep) reorg and group (embed) Site *SiteInfo `json:"-"` title string Description string Keywords []string data map[string]interface{} pagemeta.PageDates Sitemap Sitemap pagemeta.URLPath frontMatterURL string permalink string relPermalink string // relative target path without extension and any base path element // from the baseURL or the language code. // This is used to construct paths in the page resources. relTargetPathBase string // Is set to a forward slashed path if this is a Page resources living in a folder below its owner. resourcePath string // This is enabled if it is a leaf bundle (the "index.md" type) and it is marked as headless in front matter. // Being headless means that // 1. The page itself is not rendered to disk // 2. It is not available in .Site.Pages etc. // 3. But you can get it via .Site.GetPage headless bool layoutDescriptor output.LayoutDescriptor scratch *maps.Scratch // It would be tempting to use the language set on the Site, but in they way we do // multi-site processing, these values may differ during the initial page processing. language *langs.Language lang string // When in Fast Render Mode, we only render a sub set of the pages, i.e. the // pages the user is working on. There are, however, situations where we need to // signal other pages to be rendered. forceRender bool // The output formats this page will be rendered to. outputFormats output.Formats // This is the PageOutput that represents the first item in outputFormats. // Use with care, as there are potential for inifinite loops. mainPageOutput *PageOutput targetPathDescriptorPrototype *targetPathDescriptor } func stackTrace(length int) string { trace := make([]byte, length) runtime.Stack(trace, true) return string(trace) } func (p *Page) Data() interface{} { return p.data } func (p *Page) initContent() { p.contentInit.Do(func() { // This careful dance is here to protect against circular loops in shortcode/content // constructs. // TODO(bep) context vs the remote shortcodes ctx, cancel := context.WithTimeout(context.Background(), p.s.Timeout) defer cancel() c := make(chan error, 1) go func() { var err error p.contentInitMu.Lock() defer p.contentInitMu.Unlock() err = p.prepareForRender() if err != nil { c <- err return } if len(p.summary) == 0 { if err = p.setAutoSummary(); err != nil { err = p.errorf(err, "failed to set auto summary") } } c <- err }() select { case <-ctx.Done(): p.s.Log.WARN.Printf("WARNING: Timed out creating content for page %q (.Content will be empty). This is most likely a circular shortcode content loop that should be fixed. If this is just a shortcode calling a slow remote service, try to set \"timeout=20000\" (or higher, value is in milliseconds) in config.toml.\n", p.pathOrTitle()) case err := <-c: if err != nil { p.s.SendError(err) } } }) } // This is sent to the shortcodes for this page. Not doing that will create an infinite regress. So, // shortcodes can access .Page.TableOfContents, but not .Page.Content etc. func (p *Page) withoutContent() *PageWithoutContent { p.pageInit.withoutContentInit.Do(func() { p.pageWithoutContent = &PageWithoutContent{Page: p} }) return p.pageWithoutContent } func (p *Page) Content() (interface{}, error) { return p.content(), nil } func (p *Page) Truncated() bool { p.initContent() return p.truncated } func (p *Page) content() template.HTML { p.initContent() return p.contentv } func (p *Page) Summary() template.HTML { p.initContent() return p.summary } // Sites is a convenience method to get all the Hugo sites/languages configured. func (p *Page) Sites() SiteInfos { infos := make(SiteInfos, len(p.s.owner.Sites)) for i, site := range p.s.owner.Sites { infos[i] = &site.Info } return infos } // SearchKeywords implements the related.Document interface needed for fast page searches. func (p *Page) SearchKeywords(cfg related.IndexConfig) ([]related.Keyword, error) { v, err := p.Param(cfg.Name) if err != nil { return nil, err } return cfg.ToKeywords(v) } // PubDate is when this page was or will be published. // NOTE: This is currently used for search only and is not meant to be used // directly in templates. We need to consolidate the dates in this struct. // TODO(bep) see https://github.com/gohugoio/hugo/issues/3854 func (p *Page) PubDate() time.Time { if !p.PublishDate.IsZero() { return p.PublishDate } return p.Date } func (*Page) ResourceType() string { return pageResourceType } func (p *Page) RSSLink() template.URL { f, found := p.outputFormats.GetByName(output.RSSFormat.Name) if !found { return "" } return template.URL(newOutputFormat(p, f).Permalink()) } func (p *Page) createLayoutDescriptor() output.LayoutDescriptor { var section string switch p.Kind { case KindSection: // In Hugo 0.22 we introduce nested sections, but we still only // use the first level to pick the correct template. This may change in // the future. section = p.sections[0] case KindTaxonomy, KindTaxonomyTerm: section = p.s.taxonomiesPluralSingular[p.sections[0]] default: } return output.LayoutDescriptor{ Kind: p.Kind, Type: p.Type(), Lang: p.Lang(), Layout: p.Layout, Section: section, } } // pageInit lazy initializes different parts of the page. It is extracted // into its own type so we can easily create a copy of a given page. type pageInit struct { languageInit sync.Once pageMenusInit sync.Once pageMetaInit sync.Once renderingConfigInit sync.Once withoutContentInit sync.Once } type pageContentInit struct { contentInitMu sync.Mutex contentInit sync.Once plainInit sync.Once plainWordsInit sync.Once } func (p *Page) resetContent() { p.pageContentInit = &pageContentInit{} } // IsNode returns whether this is an item of one of the list types in Hugo, // i.e. not a regular content page. func (p *Page) IsNode() bool { return p.Kind != KindPage } // IsHome returns whether this is the home page. func (p *Page) IsHome() bool { return p.Kind == KindHome } // IsSection returns whether this is a section page. func (p *Page) IsSection() bool { return p.Kind == KindSection } // IsPage returns whether this is a regular content page. func (p *Page) IsPage() bool { return p.Kind == KindPage } // BundleType returns the bundle type: "leaf", "branch" or an empty string if it is none. // See https://gohugo.io/content-management/page-bundles/ func (p *Page) BundleType() string { if p.IsNode() { return "branch" } var source interface{} = p.File if fi, ok := source.(*fileInfo); ok { switch fi.bundleTp { case bundleBranch: return "branch" case bundleLeaf: return "leaf" } } return "" } func (p *Page) MediaType() media.Type { return media.OctetType } type PageMeta struct { wordCount int fuzzyWordCount int readingTime int Weight int } type Position struct { PrevPage *Page NextPage *Page PrevInSection *Page NextInSection *Page } type Pages []*Page func (ps Pages) String() string { return fmt.Sprintf("Pages(%d)", len(ps)) } func (ps Pages) findPagePosByFilename(filename string) int { for i, x := range ps { if x.Filename() == filename { return i } } return -1 } func (ps Pages) removeFirstIfFound(p *Page) Pages { ii := -1 for i, pp := range ps { if pp == p { ii = i break } } if ii != -1 { ps = append(ps[:ii], ps[ii+1:]...) } return ps } func (ps Pages) findPagePosByFilnamePrefix(prefix string) int { if prefix == "" { return -1 } lenDiff := -1 currPos := -1 prefixLen := len(prefix) // Find the closest match for i, x := range ps { if strings.HasPrefix(x.Filename(), prefix) { diff := len(x.Filename()) - prefixLen if lenDiff == -1 || diff < lenDiff { lenDiff = diff currPos = i } } } return currPos } // findPagePos Given a page, it will find the position in Pages // will return -1 if not found func (ps Pages) findPagePos(page *Page) int { for i, x := range ps { if x.Filename() == page.Filename() { return i } } return -1 } func (p *Page) Plain() string { p.initContent() p.initPlain(true) return p.plain } func (p *Page) initPlain(lock bool) { p.plainInit.Do(func() { if lock { p.contentInitMu.Lock() defer p.contentInitMu.Unlock() } p.plain = helpers.StripHTML(string(p.contentv)) }) } func (p *Page) PlainWords() []string { p.initContent() p.initPlainWords(true) return p.plainWords } func (p *Page) initPlainWords(lock bool) { p.plainWordsInit.Do(func() { if lock { p.contentInitMu.Lock() defer p.contentInitMu.Unlock() } p.plainWords = strings.Fields(p.plain) }) } // Param is a convenience method to do lookups in Page's and Site's Params map, // in that order. // // This method is also implemented on Node and SiteInfo. func (p *Page) Param(key interface{}) (interface{}, error) { keyStr, err := cast.ToStringE(key) if err != nil { return nil, err } keyStr = strings.ToLower(keyStr) result, _ := p.traverseDirect(keyStr) if result != nil { return result, nil } keySegments := strings.Split(keyStr, ".") if len(keySegments) == 1 { return nil, nil } return p.traverseNested(keySegments) } func (p *Page) traverseDirect(key string) (interface{}, error) { keyStr := strings.ToLower(key) if val, ok := p.params[keyStr]; ok { return val, nil } return p.Site.Params[keyStr], nil } func (p *Page) traverseNested(keySegments []string) (interface{}, error) { result := traverse(keySegments, p.params) if result != nil { return result, nil } result = traverse(keySegments, p.Site.Params) if result != nil { return result, nil } // Didn't find anything, but also no problems. return nil, nil } func traverse(keys []string, m map[string]interface{}) interface{} { // Shift first element off. firstKey, rest := keys[0], keys[1:] result := m[firstKey] // No point in continuing here. if result == nil { return result } if len(rest) == 0 { // That was the last key. return result } // That was not the last key. return traverse(rest, cast.ToStringMap(result)) } func (p *Page) Author() Author { authors := p.Authors() for _, author := range authors { return author } return Author{} } func (p *Page) Authors() AuthorList { authorKeys, ok := p.params["authors"] if !ok { return AuthorList{} } authors := authorKeys.([]string) if len(authors) < 1 || len(p.Site.Authors) < 1 { return AuthorList{} } al := make(AuthorList) for _, author := range authors { a, ok := p.Site.Authors[author] if ok { al[author] = a } } return al } func (p *Page) UniqueID() string { return p.File.UniqueID() } // Returns the page as summary and main. func (p *Page) setUserDefinedSummary(rawContentCopy []byte) (*summaryContent, error) { sc, err := splitUserDefinedSummaryAndContent(p.Markup, rawContentCopy) if err != nil { return nil, err } if sc == nil { // No divider found return nil, nil } p.summary = helpers.BytesToHTML(sc.summary) return sc, nil } // Make this explicit so there is no doubt about what is what. type summaryContent struct { summary []byte content []byte } func splitUserDefinedSummaryAndContent(markup string, c []byte) (sc *summaryContent, err error) { defer func() { if r := recover(); r != nil { err = fmt.Errorf("summary split failed: %s", r) } }() c = bytes.TrimSpace(c) startDivider := bytes.Index(c, internalSummaryDivider) if startDivider == -1 { return } endDivider := startDivider + len(internalSummaryDivider) endSummary := startDivider var ( startMarkup []byte endMarkup []byte addDiv bool ) switch markup { default: startMarkup = []byte("<p>") endMarkup = []byte("</p>") case "asciidoc": startMarkup = []byte("<div class=\"paragraph\">") endMarkup = []byte("</div>") case "rst": startMarkup = []byte("<p>") endMarkup = []byte("</p>") addDiv = true } // Find the closest end/start markup string to the divider fromStart := -1 fromIdx := bytes.LastIndex(c[:startDivider], startMarkup) if fromIdx != -1 { fromStart = startDivider - fromIdx - len(startMarkup) } fromEnd := bytes.Index(c[endDivider:], endMarkup) if fromEnd != -1 && fromEnd <= fromStart { endSummary = startDivider + fromEnd + len(endMarkup) } else if fromStart != -1 && fromEnd != -1 { endSummary = startDivider - fromStart - len(startMarkup) } withoutDivider := bytes.TrimSpace(append(c[:startDivider], c[endDivider:]...)) var ( summary []byte ) if len(withoutDivider) > 0 { summary = bytes.TrimSpace(withoutDivider[:endSummary]) } if addDiv { // For the rst summary = append(append([]byte(nil), summary...), []byte("</div>")...) } if err != nil { return } sc = &summaryContent{ summary: summary, content: withoutDivider, } return } func (p *Page) setAutoSummary() error { var summary string var truncated bool // This careful init dance could probably be refined, but it is purely for performance // reasons. These "plain" methods are expensive if the plain content is never actually // used. p.initPlain(false) if p.isCJKLanguage { p.initPlainWords(false) summary, truncated = p.s.ContentSpec.TruncateWordsByRune(p.plainWords) } else { summary, truncated = p.s.ContentSpec.TruncateWordsToWholeSentence(p.plain) } p.summary = template.HTML(summary) p.truncated = truncated return nil } func (p *Page) renderContent(content []byte) []byte { return p.s.ContentSpec.RenderBytes(&helpers.RenderingContext{ Content: content, RenderTOC: true, PageFmt: p.Markup, Cfg: p.Language(), DocumentID: p.UniqueID(), DocumentName: p.Path(), Config: p.getRenderingConfig()}) } func (p *Page) getRenderingConfig() *helpers.BlackFriday { p.renderingConfigInit.Do(func() { bfParam := p.getParamToLower("blackfriday") if bfParam == nil { p.renderingConfig = p.s.ContentSpec.BlackFriday return } // Create a copy so we can modify it. bf := *p.s.ContentSpec.BlackFriday p.renderingConfig = &bf if p.Language() == nil { panic(fmt.Sprintf("nil language for %s with source lang %s", p.BaseFileName(), p.lang)) } pageParam := cast.ToStringMap(bfParam) if err := mapstructure.Decode(pageParam, &p.renderingConfig); err != nil { p.s.Log.FATAL.Printf("Failed to get rendering config for %s:\n%s", p.BaseFileName(), err.Error()) } }) return p.renderingConfig } func (s *Site) newPage(filename string) *Page { fi := newFileInfo( s.SourceSpec, s.absContentDir(), filename, nil, bundleNot, ) return s.newPageFromFile(fi) } func (s *Site) newPageFromFile(fi *fileInfo) *Page { return &Page{ pageInit: &pageInit{}, pageContentInit: &pageContentInit{}, Kind: kindFromFileInfo(fi), contentType: "", File: fi, Keywords: []string{}, Sitemap: Sitemap{Priority: -1}, params: make(map[string]interface{}), translations: make(Pages, 0), sections: sectionsFromFile(fi), Site: &s.Info, s: s, } } func (p *Page) IsRenderable() bool { return p.renderable } func (p *Page) Type() string { if p.contentType != "" { return p.contentType } if x := p.Section(); x != "" { return x } return "page" } // Section returns the first path element below the content root. Note that // since Hugo 0.22 we support nested sections, but this will always be the first // element of any nested path. func (p *Page) Section() string { if p.Kind == KindSection || p.Kind == KindTaxonomy || p.Kind == KindTaxonomyTerm { return p.sections[0] } return p.File.Section() } func (s *Site) newPageFrom(buf io.Reader, name string) (*Page, error) { p, err := s.NewPage(name) if err != nil { return p, err } _, err = p.ReadFrom(buf) if err != nil { return nil, err } return p, err } func (s *Site) NewPage(name string) (*Page, error) { if len(name) == 0 { return nil, errors.New("Zero length page name") } // Create new page p := s.newPage(name) p.s = s p.Site = &s.Info return p, nil } func (p *Page) ReadFrom(buf io.Reader) (int64, error) { // Parse for metadata & body if err := p.parse(buf); err != nil { return 0, p.errWithFileContext(err) } if err := p.mapContent(); err != nil { return 0, p.errWithFileContext(err) } return int64(len(p.source.parsed.Input())), nil } func (p *Page) WordCount() int { p.initContentPlainAndMeta() return p.wordCount } func (p *Page) ReadingTime() int { p.initContentPlainAndMeta() return p.readingTime } func (p *Page) FuzzyWordCount() int { p.initContentPlainAndMeta() return p.fuzzyWordCount } func (p *Page) initContentPlainAndMeta() { p.initContent() p.initPlain(true) p.initPlainWords(true) p.initMeta() } func (p *Page) initContentAndMeta() { p.initContent() p.initMeta() } func (p *Page) initMeta() { p.pageMetaInit.Do(func() { if p.isCJKLanguage { p.wordCount = 0 for _, word := range p.plainWords { runeCount := utf8.RuneCountInString(word) if len(word) == runeCount { p.wordCount++ } else { p.wordCount += runeCount } } } else { p.wordCount = helpers.TotalWords(p.plain) } // TODO(bep) is set in a test. Fix that. if p.fuzzyWordCount == 0 { p.fuzzyWordCount = (p.wordCount + 100) / 100 * 100 } if p.isCJKLanguage { p.readingTime = (p.wordCount + 500) / 501 } else { p.readingTime = (p.wordCount + 212) / 213 } }) } // HasShortcode return whether the page has a shortcode with the given name. // This method is mainly motivated with the Hugo Docs site's need for a list // of pages with the `todo` shortcode in it. func (p *Page) HasShortcode(name string) bool { if p.shortcodeState == nil { return false } return p.shortcodeState.nameSet[name] } // AllTranslations returns all translations, including the current Page. func (p *Page) AllTranslations() Pages { return p.translations } // IsTranslated returns whether this content file is translated to // other language(s). func (p *Page) IsTranslated() bool { return len(p.translations) > 1 } // Translations returns the translations excluding the current Page. func (p *Page) Translations() Pages { translations := make(Pages, 0) for _, t := range p.translations { if t.Lang() != p.Lang() { translations = append(translations, t) } } return translations } // TranslationKey returns the key used to map language translations of this page. // It will use the translationKey set in front matter if set, or the content path and // filename (excluding any language code and extension), e.g. "about/index". // The Page Kind is always prepended. func (p *Page) TranslationKey() string { if p.translationKey != "" { return p.Kind + "/" + p.translationKey } if p.IsNode() { return path.Join(p.Kind, path.Join(p.sections...), p.TranslationBaseName()) } return path.Join(p.Kind, filepath.ToSlash(p.Dir()), p.TranslationBaseName()) } func (p *Page) LinkTitle() string { if len(p.linkTitle) > 0 { return p.linkTitle } return p.title } func (p *Page) shouldBuild() bool { return shouldBuild(p.s.BuildFuture, p.s.BuildExpired, p.s.BuildDrafts, p.Draft, p.PublishDate, p.ExpiryDate) } func shouldBuild(buildFuture bool, buildExpired bool, buildDrafts bool, Draft bool, publishDate time.Time, expiryDate time.Time) bool { if !(buildDrafts || !Draft) { return false } if !buildFuture && !publishDate.IsZero() && publishDate.After(time.Now()) { return false } if !buildExpired && !expiryDate.IsZero() && expiryDate.Before(time.Now()) { return false } return true } func (p *Page) IsDraft() bool { return p.Draft } func (p *Page) IsFuture() bool { if p.PublishDate.IsZero() { return false } return p.PublishDate.After(time.Now()) } func (p *Page) IsExpired() bool { if p.ExpiryDate.IsZero() { return false } return p.ExpiryDate.Before(time.Now()) } func (p *Page) URL() string { if p.IsPage() && p.URLPath.URL != "" { // This is the url set in front matter return p.URLPath.URL } // Fall back to the relative permalink. u := p.RelPermalink() return u } // Permalink returns the absolute URL to this Page. func (p *Page) Permalink() string { if p.headless { return "" } return p.permalink } // RelPermalink gets a URL to the resource relative to the host. func (p *Page) RelPermalink() string { if p.headless { return "" } return p.relPermalink } // See resource.Resource // This value is used, by default, in Resources.ByPrefix etc. func (p *Page) Name() string { if p.resourcePath != "" { return p.resourcePath } return p.title } func (p *Page) Title() string { return p.title } func (p *Page) Params() map[string]interface{} { return p.params } func (p *Page) subResourceTargetPathFactory(base string) string { return path.Join(p.relTargetPathBase, base) } func (p *Page) initMainOutputFormat() error { if p.mainPageOutput != nil { return nil } outFormat := p.outputFormats[0] pageOutput, err := newPageOutput(p, false, false, outFormat) if err != nil { return p.errorf(err, "failed to create output page for type %q", outFormat.Name) } p.mainPageOutput = pageOutput return nil } func (p *Page) setContentInit(start bool) error { if start
updated := true if p.shortcodeState != nil { updated = p.shortcodeState.updateDelta() } if updated { p.resetContent() } for _, r := range p.Resources.ByType(pageResourceType) { p.s.PathSpec.ProcessingStats.Incr(&p.s.PathSpec.ProcessingStats.Pages) bp := r.(*Page) if start { bp.shortcodeState.clearDelta() } if bp.shortcodeState != nil { updated = bp.shortcodeState.updateDelta() } if updated { bp.resetContent() } } return nil } func (p *Page) prepareForRender() error { s := p.s // If we got this far it means that this is either a new Page pointer // or a template or similar has changed so wee need to do a rerendering // of the shortcodes etc. // If in watch mode or if we have multiple sites or output formats, // we need to keep the original so we can // potentially repeat this process on rebuild. needsACopy := s.running() || len(s.owner.Sites) > 1 || len(p.outputFormats) > 1 var workContentCopy []byte if needsACopy { workContentCopy = make([]byte, len(p.workContent)) copy(workContentCopy, p.workContent) } else { // Just reuse the same slice. workContentCopy = p.workContent } var err error // Note: The shortcodes in a page cannot access the page content it lives in, // hence the withoutContent(). if workContentCopy, err = handleShortcodes(p.withoutContent(), workContentCopy); err != nil { return err } if p.Markup != "html" && p.source.hasSummaryDivider { // Now we know enough to create a summary of the page and count some words summaryContent, err := p.setUserDefinedSummary(workContentCopy) if err != nil { s.Log.ERROR.Printf("Failed to set user defined summary for page %q: %s", p.Path(), err) } else if summaryContent != nil { workContentCopy = summaryContent.content } p.contentv = helpers.BytesToHTML(workContentCopy) } else { p.contentv = helpers.BytesToHTML(workContentCopy) } return nil } func (p *Page) updateMetaData(frontmatter map[string]interface{}) error { if frontmatter == nil { return errors.New("missing frontmatter data") } // Needed for case insensitive fetching of params values maps.ToLower(frontmatter) var mtime time.Time if p.FileInfo() != nil { mtime = p.FileInfo().ModTime() } var gitAuthorDate time.Time if p.GitInfo != nil { gitAuthorDate = p.GitInfo.AuthorDate } descriptor := &pagemeta.FrontMatterDescriptor{ Frontmatter: frontmatter, Params: p.params, Dates: &p.PageDates, PageURLs: &p.URLPath, BaseFilename: p.ContentBaseName(), ModTime: mtime, GitAuthorDate: gitAuthorDate, } // Handle the date separately // TODO(bep) we need to "do more" in this area so this can be split up and // more easily tested without the Page, but the coupling is strong. err := p.s.frontmatterHandler.HandleDates(descriptor) if err != nil { p.s.Log.ERROR.Printf("Failed to handle dates for page %q: %s", p.Path(), err) } var draft, published, isCJKLanguage *bool for k, v := range frontmatter { loki := strings.ToLower(k) if loki == "published" { // Intentionally undocumented vv, err := cast.ToBoolE(v) if err == nil { published = &vv } // published may also be a date continue } if p.s.frontmatterHandler.IsDateKey(loki) { continue } switch loki { case "title": p.title = cast.ToString(v) p.params[loki] = p.title case "linktitle": p.linkTitle = cast.ToString(v) p.params[loki] = p.linkTitle case "description": p.Description = cast.ToString(v) p.params[loki] = p.Description case "slug": p.Slug = cast.ToString(v) p.params[loki] = p.Slug case "url": if url := cast.ToString(v); strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { return fmt.Errorf("Only relative URLs are supported, %v provided", url) } p.URLPath.URL = cast.ToString(v) p.frontMatterURL = p.URLPath.URL p.params[loki] = p.URLPath.URL case "type": p.contentType = cast.ToString(v) p.params[loki] = p.contentType case "extension", "ext": p.extension = cast.ToString(v) p.params[loki] = p.extension case "keywords": p.Keywords = cast.ToStringSlice(v) p.params[loki] = p.Keywords case "headless": // For now, only the leaf bundles ("index.md") can be headless (i.e. produce no output). // We may expand on this in the future, but that gets more complex pretty fast. if p.TranslationBaseName() == "index" { p.headless = cast.ToBool(v) } p.params[loki] = p.headless case "outputs": o := cast.ToStringSlice(v) if len(o) > 0 { // Output formats are exlicitly set in front matter, use those. outFormats, err := p.s.outputFormatsConfig.GetByNames(o...) if err != nil { p.s.Log.ERROR.Printf("Failed to resolve output formats: %s", err) } else { p.outputFormats = outFormats p.params[loki] = outFormats } } case "draft": draft = new(bool) *draft = cast.ToBool(v) case "layout": p.Layout = cast.ToString(v) p.params[loki] = p.Layout case "markup": p.Markup = cast.ToString(v) p.params[loki] = p.Markup case "weight": p.Weight = cast.ToInt(v) p.params[loki] = p.Weight case "aliases": p.Aliases = cast.ToStringSlice(v) for _, alias := range p.Aliases { if strings.HasPrefix(alias, "http://") || strings.HasPrefix(alias, "https://") { return fmt.Errorf("Only relative aliases are supported, %v provided", alias) } } p.params[loki] = p.Aliases case "status": p.Status = cast.ToString(v) p.params[loki] = p.Status case "sitemap": p.Sitemap = parseSitemap(cast.ToStringMap(v)) p.params[loki] = p.Sitemap case "iscjklanguage": isCJKLanguage = new(bool) *isCJKLanguage = cast.ToBool(v) case "translationkey": p.translationKey = cast.ToString(v) p.params[loki] = p.translationKey case "resources": var resources []map[string]interface{} handled := true switch vv := v.(type) { case []map[interface{}]interface{}: for _, vvv := range vv { resources = append(resources, cast.ToStringMap(vvv)) } case []map[string]interface{}: resources = append(resources, vv...) case []interface{}: for _, vvv := range vv { switch vvvv := vvv.(type) { case map[interface{}]interface{}: resources = append(resources, cast.ToStringMap(vvvv)) case map[string]interface{}: resources = append(resources, vvvv) } } default: handled = false } if handled { p.params[loki] = resources p.resourcesMetadata = resources break } fallthrough default: // If not one of the explicit values, store in Params switch vv := v.(type) { case bool: p.params[loki] = vv case string: p.params[loki] = vv case int64, int32, int16, int8, int: p.params[loki] = vv case float64, float32: p.params[loki] = vv case time.Time: p.params[loki] = vv default: // handle array of strings as well switch vvv := vv.(type) { case []interface{}: if len(vvv) > 0 { switch vvv[0].(type) { case map[interface{}]interface{}: // Proper parsing structured array from YAML based FrontMatter p.params[loki] = vvv case map[string]interface{}: // Proper parsing structured array from JSON based FrontMatter p.params[loki] = vvv case []interface{}: p.params[loki] = vvv default: a := make([]string, len(vvv)) for i, u := range vvv { a[i] = cast.ToString(u) } p.params[loki] = a } } else { p.params[loki] = []string{} } default: p.params[loki] = vv } } } } // Try markup explicitly set in the frontmatter p.Markup = helpers.GuessType(p.Markup) if p.Markup == "unknown" { // Fall back to file extension (might also return "unknown") p.Markup = helpers.GuessType(p.Ext()) } if draft != nil && published != nil { p.Draft = *draft p.s.Log.WARN.Printf("page %q has both draft and published settings in its frontmatter. Using draft.", p.File.Path()) } else if draft != nil { p.Draft = *draft } else if published != nil { p.Draft = !*published } p.params["draft"] = p.Draft if isCJKLanguage != nil { p.isCJKLanguage = *isCJKLanguage } else if p.s.Cfg.GetBool("hasCJKLanguage") { if cjk.Match(p.source.parsed.Input()) { p.isCJKLanguage = true } else { p.isCJKLanguage = false } } p.params["iscjklanguage"] = p.isCJKLanguage return nil } func (p *Page) GetParam(key string) interface{} { return p.getParam(key, false) } func (p *Page) getParamToLower(key string) interface{} { return p.getParam(key, true) } func (p *Page) getParam(key string, stringToLower bool) interface{} { v := p.params[strings.ToLower(key)] if v == nil { return nil } switch val := v.(type) { case bool: return val case string: if stringToLower { return strings.ToLower(val) } return val case int64, int32, int16, int8, int: return cast.ToInt(v) case float64, float32: return cast.ToFloat64(v) case time.Time: return val case []string: if stringToLower { return helpers.SliceToLower(val) } return v case map[string]interface{}: // JSON and TOML return v case map[interface{}]interface{}: // YAML return v } p.s.Log.ERROR.Printf("GetParam(\"%s\"): Unknown type %s\n", key, reflect.TypeOf(v)) return nil } func (p *Page) HasMenuCurrent(menuID string, me *MenuEntry) bool { sectionPagesMenu := p.Site.sectionPagesMenu // page is labeled as "shadow-member" of the menu with the same identifier as the section if sectionPagesMenu != "" { section := p.Section() if section != "" && sectionPagesMenu == menuID && section == me.Identifier { return true } } if !me.HasChildren() { return false } menus := p.Menus() if m, ok := menus[menuID]; ok { for _, child := range me.Children { if child.IsEqual(m) { return true } if p.HasMenuCurrent(menuID, child) { return true } } } if p.IsPage() { return false } // The following logic is kept from back when Hugo had both Page and Node types. // TODO(bep) consolidate / clean nme := MenuEntry{Page: p, Name: p.title, URL: p.URL()} for _, child := range me.Children { if nme.IsSameResource(child) { return true } if p.HasMenuCurrent(menuID, child) { return true } } return false } func (p *Page) IsMenuCurrent(menuID string, inme *MenuEntry) bool { menus := p.Menus() if me, ok := menus[menuID]; ok { if me.IsEqual(inme) { return true } } if p.IsPage() { return false } // The following logic is kept from back when Hugo had both Page and Node types. // TODO(bep) consolidate / clean me := MenuEntry{Page: p, Name: p.title, URL: p.URL()} if !me.IsSameResource(inme) { return false } // this resource may be included in several menus // search for it to make sure that it is in the menu with the given menuId if menu, ok := (*p.Site.Menus)[menuID]; ok { for _, menuEntry := range *menu { if menuEntry.IsSameResource(inme) { return true } descendantFound := p.isSameAsDescendantMenu(inme, menuEntry) if descendantFound { return descendantFound } } } return false } func (p *Page) isSameAsDescendantMenu(inme *MenuEntry, parent *MenuEntry) bool { if parent.HasChildren() { for _, child := range parent.Children { if child.IsSameResource(inme) { return true } descendantFound := p.isSameAsDescendantMenu(inme, child) if descendantFound { return descendantFound } } } return false } func (p *Page) Menus() PageMenus { p.pageMenusInit.Do(func() { p.pageMenus = PageMenus{} if ms, ok := p.params["menu"]; ok { link := p.RelPermalink() me := MenuEntry{Page: p, Name: p.LinkTitle(), Weight: p.Weight, URL: link} // Could be the name of the menu to attach it to mname, err := cast.ToStringE(ms) if err == nil { me.Menu = mname p.pageMenus[mname] = &me return } // Could be a slice of strings mnames, err := cast.ToStringSliceE(ms) if err == nil { for _, mname := range mnames { me.Menu = mname p.pageMenus[mname] = &me } return } // Could be a structured menu entry menus, err := cast.ToStringMapE(ms) if err != nil { p.s.Log.ERROR.Printf("unable to process menus for %q\n", p.title) } for name, menu := range menus { menuEntry := MenuEntry{Page: p, Name: p.LinkTitle(), URL: link, Weight: p.Weight, Menu: name} if menu != nil { p.s.Log.DEBUG.Printf("found menu: %q, in %q\n", name, p.title) ime, err := cast.ToStringMapE(menu) if err != nil { p.s.Log.ERROR.Printf("unable to process menus for %q: %s", p.title, err) } menuEntry.marshallMap(ime) } p.pageMenus[name] = &menuEntry } } }) return p.pageMenus } func (p *Page) shouldRenderTo(f output.Format) bool { _, found := p.outputFormats.GetByName(f.Name) return found } // RawContent returns the un-rendered source content without // any leading front matter. func (p *Page) RawContent() string { if p.source.posMainContent == -1 { return "" } return string(p.source.parsed.Input()[p.source.posMainContent:]) } func (p *Page) FullFilePath() string { return filepath.Join(p.Dir(), p.LogicalName()) } // Returns the canonical, absolute fully-qualifed logical reference used by // methods such as GetPage and ref/relref shortcodes to refer to // this page. It is prefixed with a "/". // // For pages that have a source file, it is returns the path to this file as an // absolute path rooted in this site's content dir. // For pages that do not (sections witout content page etc.), it returns the // virtual path, consistent with where you would add a source file. func (p *Page) absoluteSourceRef() string { if p.File != nil { sourcePath := p.Path() if sourcePath != "" { return "/" + filepath.ToSlash(sourcePath) } } if len(p.sections) > 0 { // no backing file, return the virtual source path return "/" + path.Join(p.sections...) } return "" } // Pre render prepare steps func (p *Page) prepareLayouts() error { // TODO(bep): Check the IsRenderable logic. if p.Kind == KindPage { if !p.IsRenderable() { self := "__" + p.UniqueID() err := p.s.TemplateHandler().AddLateTemplate(self, string(p.content())) if err != nil { return err } p.selfLayout = self } } return nil } func (p *Page) prepareData(s *Site) error { if p.Kind != KindSection { var pages Pages p.data = make(map[string]interface{}) switch p.Kind { case KindPage: case KindHome: pages = s.RegularPages case KindTaxonomy: plural := p.sections[0] term := p.sections[1] if s.Info.preserveTaxonomyNames { if v, ok := s.taxonomiesOrigKey[fmt.Sprintf("%s-%s", plural, term)]; ok { term = v } } singular := s.taxonomiesPluralSingular[plural] taxonomy := s.Taxonomies[plural].Get(term) p.data[singular] = taxonomy p.data["Singular"] = singular p.data["Plural"] = plural p.data["Term"] = term pages = taxonomy.Pages() case KindTaxonomyTerm: plural := p.sections[0] singular := s.taxonomiesPluralSingular[plural] p.data["Singular"] = singular p.data["Plural"] = plural p.data["Terms"] = s.Taxonomies[plural] // keep the following just for legacy reasons p.data["OrderedIndex"] = p.data["Terms"] p.data["Index"] = p.data["Terms"] // A list of all KindTaxonomy pages with matching plural for _, p := range s.findPagesByKind(KindTaxonomy) { if p.sections[0] == plural { pages = append(pages, p) } } } p.data["Pages"] = pages p.Pages = pages } // Now we know enough to set missing dates on home page etc. p.updatePageDates() return nil } func (p *Page) updatePageDates() { // TODO(bep) there is a potential issue with page sorting for home pages // etc. without front matter dates set, but let us wrap the head around // that in another time. if !p.IsNode() { return } if !p.Date.IsZero() { if p.Lastmod.IsZero() { p.Lastmod = p.Date } return } else if !p.Lastmod.IsZero() { if p.Date.IsZero() { p.Date = p.Lastmod } return } // Set it to the first non Zero date in children var foundDate, foundLastMod bool for _, child := range p.Pages { if !child.Date.IsZero() { p.Date = child.Date foundDate = true } if !child.Lastmod.IsZero() { p.Lastmod = child.Lastmod foundLastMod = true } if foundDate && foundLastMod { break } } } // copy creates a copy of this page with the lazy sync.Once vars reset // so they will be evaluated again, for word count calculations etc. func (p *Page) copy(initContent bool) *Page { p.contentInitMu.Lock() c := *p p.contentInitMu.Unlock() c.pageInit = &pageInit{} if initContent { if len(p.outputFormats) < 2 { panic(fmt.Sprintf("programming error: page %q should not need to rebuild content as it has only %d outputs", p.Path(), len(p.outputFormats))) } c.pageContentInit = &pageContentInit{} } return &c } func (p *Page) Hugo() *HugoInfo { return hugoInfo } // GetPage looks up a page for the given ref. // {{ with .GetPage "blog" }}{{ .Title }}{{ end }} // // This will return nil when no page could be found, and will return // an error if the ref is ambiguous. func (p *Page) GetPage(ref string) (*Page, error) { return p.s.getPageNew(p, ref) } type refArgs struct { Path string Lang string OutputFormat string } func (p *Page) decodeRefArgs(args map[string]interface{}) (refArgs, *SiteInfo, error) { var ra refArgs err := mapstructure.WeakDecode(args, &ra) if err != nil { return ra, nil, nil } s := p.Site if ra.Lang != "" && ra.Lang != p.Lang() { // Find correct site found := false for _, ss := range p.s.owner.Sites { if ss.Lang() == ra.Lang { found = true s = &ss.Info } } if !found { p.s.siteRefLinker.logNotFound(ra.Path, fmt.Sprintf("no site found with lang %q", ra.Lang), p) return ra, nil, nil } } return ra, s, nil } func (p *Page) Ref(argsm map[string]interface{}) (string, error) { args, s, err := p.decodeRefArgs(argsm) if err != nil { return "", _errors.Wrap(err, "invalid arguments to Ref") } if s == nil { return p.s.siteRefLinker.notFoundURL, nil } if args.Path == "" { return "", nil } if args.OutputFormat != "" { return s.Ref(args.Path, p, args.OutputFormat) } return s.Ref(args.Path, p) } func (p *Page) RelRef(argsm map[string]interface{}) (string, error) { args, s, err := p.decodeRefArgs(argsm) if err != nil { return "", _errors.Wrap(err, "invalid arguments to Ref") } if s == nil { return p.s.siteRefLinker.notFoundURL, nil } if args.Path == "" { return "", nil } if args.OutputFormat != "" { return s.RelRef(args.Path, p, args.OutputFormat) } return s.RelRef(args.Path, p) } func (p *Page) String() string { if sourceRef := p.absoluteSourceRef(); sourceRef != "" { return fmt.Sprintf("Page(%s)", sourceRef) } return fmt.Sprintf("Page(%q)", p.title) } // Scratch returns the writable context associated with this Page. func (p *Page) Scratch() *maps.Scratch { if p.scratch == nil { p.scratch = maps.NewScratch() } return p.scratch } func (p *Page) Language() *langs.Language { p.initLanguage() return p.language } func (p *Page) Lang() string { // When set, Language can be different from lang in the case where there is a // content file (doc.sv.md) with language indicator, but there is no language // config for that language. Then the language will fall back on the site default. if p.Language() != nil { return p.Language().Lang } return p.lang } func (p *Page) isNewTranslation(candidate *Page) bool { if p.Kind != candidate.Kind { return false } if p.Kind == KindPage || p.Kind == kindUnknown { panic("Node type not currently supported for this op") } // At this point, we know that this is a traditional Node (home page, section, taxonomy) // It represents the same node, but different language, if the sections is the same. if len(p.sections) != len(candidate.sections) { return false } for i := 0; i < len(p.sections); i++ { if p.sections[i] != candidate.sections[i] { return false } } // Finally check that it is not already added. for _, translation := range p.translations { if candidate == translation { return false } } return true } func (p *Page) shouldAddLanguagePrefix() bool { if !p.Site.IsMultiLingual() { return false } if p.s.owner.IsMultihost() { return true } if p.Lang() == "" { return false } if !p.Site.defaultContentLanguageInSubdir && p.Lang() == p.s.multilingual().DefaultLang.Lang { return false } return true } func (p *Page) initLanguage() { p.languageInit.Do(func() { if p.language != nil { return } ml := p.s.multilingual() if ml == nil { panic("Multilanguage not set") } if p.lang == "" { p.lang = ml.DefaultLang.Lang p.language = ml.DefaultLang return } language := ml.Language(p.lang) if language == nil { // It can be a file named stefano.chiodino.md. p.s.Log.WARN.Printf("Page language (if it is that) not found in multilang setup: %s.", p.lang) language = ml.DefaultLang } p.language = language }) } func (p *Page) LanguagePrefix() string { return p.Site.LanguagePrefix } func (p *Page) addLangPathPrefixIfFlagSet(outfile string, should bool) string { if helpers.IsAbsURL(outfile) { return outfile } if !should { return outfile } hadSlashSuffix := strings.HasSuffix(outfile, "/") outfile = "/" + path.Join(p.Lang(), outfile) if hadSlashSuffix { outfile += "/" } return outfile } func sectionsFromFile(fi *fileInfo) []string { dirname := fi.Dir() dirname = strings.Trim(dirname, helpers.FilePathSeparator) if dirname == "" { return nil } parts := strings.Split(dirname, helpers.FilePathSeparator) if fi.bundleTp == bundleLeaf && len(parts) > 0 { // my-section/mybundle/index.md => my-section return parts[:len(parts)-1] } return parts } func kindFromFileInfo(fi *fileInfo) string { if fi.TranslationBaseName() == "_index" { if fi.Dir() == "" { return KindHome } // Could be index for section, taxonomy, taxonomy term // We don't know enough yet to determine which return kindUnknown } return KindPage } func (p *Page) setValuesForKind(s *Site) { if p.Kind == kindUnknown { // This is either a taxonomy list, taxonomy term or a section nodeType := s.kindFromSections(p.sections) if nodeType == kindUnknown { panic(fmt.Sprintf("Unable to determine page kind from %q", p.sections)) } p.Kind = nodeType } switch p.Kind { case KindHome: p.URLPath.URL = "/" case KindPage: default: if p.URLPath.URL == "" { p.URLPath.URL = "/" + path.Join(p.sections...) + "/" } } } // Used in error logs. func (p *Page) pathOrTitle() string { if p.Filename() != "" { return p.Filename() } return p.title } func (p *Page) Next() *Page { // TODO Remove the deprecation notice (but keep PrevPage as an alias) Hugo 0.52 helpers.Deprecated("Page", ".Next", "Use .PrevPage (yes, not .NextPage).", false) return p.PrevPage } func (p *Page) Prev() *Page { // TODO Remove the deprecation notice (but keep NextPage as an alias) Hugo 0.52 helpers.Deprecated("Page", ".Prev", "Use .NextPage (yes, not .PrevPage).", false) return p.NextPage }
{ // This is a new language. p.shortcodeState.clearDelta() }
selection-cell.component.ts
import { Component } from '@angular/core'; import { ColumnMode, SelectionType } from 'projects/swimlane/ngx-datatable/src/public-api'; @Component({ selector: 'cell-selection-demo', template: ` <div> <h3> Cell Selection <small> <a href="https://github.com/swimlane/ngx-datatable/blob/master/src/app/selection/selection-cell.component.ts" target="_blank" > Source </a> </small> </h3> <bks-datatable class="material selection-cell" [rows]="rows" [columnMode]="ColumnMode.force" [columns]="columns" [headerHeight]="50" [footerHeight]="50" [rowHeight]="50" [selected]="selected" [selectionType]="SelectionType.cell" (select)="onSelect($event)" (activate)="onActivate($event)" > </bks-datatable> </div> ` }) export class
{ rows: any[] = []; selected: any[] = []; columns: any[] = [{ prop: 'name' }, { name: 'Company' }, { name: 'Gender' }]; ColumnMode = ColumnMode; SelectionType = SelectionType; constructor() { this.fetch(data => { this.rows = data; }); } fetch(cb) { const req = new XMLHttpRequest(); req.open('GET', `assets/data/company.json`); req.onload = () => { cb(JSON.parse(req.response)); }; req.send(); } onSelect(event) { console.log('Event: select', event, this.selected); } onActivate(event) { console.log('Event: activate', event); } }
CellSelectionComponent
forgery.py
import copy import json import ssl import flexssl import re import base64 import subprocess import os import gzip import random from bson import json_util from py_mini_racer import py_mini_racer import utils.redis_utils import proxy_modules.utils def handle_connection_close_header(request_breakdown, json_config): try: do_not_modify_connection_header = json_config['do_not_modify_connection_header'] except: do_not_modify_connection_header = False if do_not_modify_connection_header is False: # Default: means force connection close on request ! new_req = copy.deepcopy(request_breakdown) new_req['connection'] = {"present": True, "value_when_present": "close"} return new_req else: # Do not modify ! keep it as is ! return request_breakdown def handle_request_headers(request_breakdown, json_config): try: headers_js_function = json_config['headers'] except: headers_js_function = None if headers_js_function is None: return request_breakdown # Run the function ! clean_function_code = headers_js_function.encode('latin1').decode('unicode-escape').encode('latin1').decode('utf-8') clean_function_code_with_payload = clean_function_code.replace("DATA_ATTRIBUTE_INPUT",json.dumps(request_breakdown['headers'])) ctx = py_mini_racer.MiniRacer() success = True try: function_output = json.loads(ctx.eval(clean_function_code_with_payload)) new_headers = [] for el in function_output: new_headers.append((el[0], el[1])) except Exception as e: success = False del ctx if success: new_req = copy.deepcopy(request_breakdown) new_req['headers'] = copy.deepcopy(new_headers) return new_req else: return request_breakdown def _gen_key_freeze(node, config_id): list_headers = copy.deepcopy(node['headers']) list_headers = [str(x).lower() for x in list_headers] list_headers = sorted(list_headers) hashed_key = node['host_regex'].lower() + '-' + '-'.join(list_headers) hashed_key = base64.b64encode(hashed_key.encode('utf-8')).decode('ascii') return 'type=headers_freeze_data/config_id='+str(config_id)+'/key=' + hashed_key, 'type=headers_freeze_counter/config_id='+str(config_id)+'/key=' + hashed_key def handle_request_headers_freeze(request_breakdown, json_config, config_id): # Init the context if not 'headers_freeze' in json_config: return request_breakdown, 'No actions taken' if request_breakdown['host'] is None: return request_breakdown, 'No actions taken' redis_client = utils.redis_utils.create_redis_client() request_breakdown_res = copy.deepcopy(request_breakdown) # Run the freeze return_msg_logs = [] idx = -1 for node in json_config['headers_freeze']: idx = idx + 1
key_data, key_counter = _gen_key_freeze(node, config_id) stored_val = redis_client.get(key_data) stored_counter = redis_client.get(key_counter) stored_counter = 0 if stored_counter is None else int(stored_counter) # Will cache if relevant if stored_val is None: will_cache = True data_to_freeze = [] for header_i in node['headers']: val_header = proxy_modules.utils.get_header_from_xploded_req(request_breakdown_res, header_i) if val_header is None: will_cache = False elif len(val_header) == 0: will_cache = False else: data_to_freeze.append([header_i, val_header]) if will_cache is True: redis_client.set(key_data, json_util.dumps(data_to_freeze)) redis_client.set(key_counter, 0) return_msg_logs.append('Node idx ' + str(idx) + ': Stored values as headers present and no cached values') else: return_msg_logs.append('Node idx ' + str(idx) + ': No actions taken ! No cached values and no headers present') # Will use cached headers else: redis_client.incr(key_counter) data_to_get = json_util.loads(stored_val) request_breakdown_res = proxy_modules.utils.merge_headers(request_breakdown_res, data_to_get) # Respect max requests ! if ((node['max_requests'] >= 0) and (stored_counter+1 >= node['max_requests'])): redis_client.delete(key_data) redis_client.delete(key_counter) return_msg_logs.append('Node idx ' + str(idx) + ': Used cached data and delete it as it reached maximum used times.') else: return_msg_logs.append('Node idx ' + str(idx) + ': Used cached data !') return request_breakdown_res, '. '.join(return_msg_logs) def handle_ssl_context(json_config): # ------------ Inputs fetch ! # 1.1. Select the version try: version_ssl = json_config['ssl']['version'] except: version_ssl = "PROTOCOL_TLS" # 1.2. Force SSL checks try: verify_ssl = json_config['ssl']['verify_ssl'] except: verify_ssl = False # 1.3. Ciphers try: ciphers_ssl = json_config['ssl']['ciphers'] if (ciphers_ssl == ""): ciphers_ssl = None except: ciphers_ssl = None # 1.4. Signatures (not used in HTTP2) try: signatures_ssl = json_config['ssl']['signatures'] if (signatures_ssl == ""): signatures_ssl = None except: signatures_ssl = None used_version = ssl.PROTOCOL_TLS if (version_ssl == "PROTOCOL_TLS"): used_version = ssl.PROTOCOL_TLS if (version_ssl == "PROTOCOL_TLSv1"): used_version = ssl.PROTOCOL_TLSv1 if (version_ssl == "PROTOCOL_TLSv1_1"): used_version = ssl.PROTOCOL_TLSv1_1 if (version_ssl == "PROTOCOL_TLSv1_2"): used_version = ssl.PROTOCOL_TLSv1_2 res_context = ssl.SSLContext(used_version) if (verify_ssl is True): res_context.verify_mode = ssl.CERT_REQUIRED else: res_context.verify_mode = ssl.CERT_NONE if not ciphers_ssl is None: res_context.set_ciphers(ciphers_ssl) if not signatures_ssl is None: flexssl.set_sigalgs(signatures_ssl, res_context) return res_context, { "version_ssl": version_ssl, "verify_ssl": verify_ssl, "ciphers_ssl": ciphers_ssl, "signatures_ssl": signatures_ssl } def handle_http2_request(request_breakdown, json_config): try: http2 = json_config['http2'] except: http2 = "NO" if not http2 in ['NO', 'YES', 'TRY']: http2 = "NO" if http2 == "NO": return False, {} if http2 == "TRY": return False, {} if http2 == "YES": host_to_req = request_breakdown['host'] if host_to_req is None: return False, {} return True, { "headers": request_breakdown['headers'], "command": request_breakdown['request_uri']['__DETAIL_command'], "path": request_breakdown['request_uri']['__DETAIL_path'], "host_connect": "https://" + host_to_req, "data": request_breakdown['data'], } def _delete_pass_file(file_name): try: os.remove(file_name) except: pass def proxy_js_function(js_function_code): # Create the JS file file_tmp_to_run = "/tmp/js_proxy_node_" + str(random.randint(0,1000000000000000)) + '.js' _delete_pass_file(file_tmp_to_run) file_js_tmp = open(file_tmp_to_run,'w') file_js_tmp.write(js_function_code) file_js_tmp.close() # Run the JS and clean node_js_stdout = b"" node_js_stderr = b"" node_js_exec_err = '' data_carried = None try: node_running_process = subprocess.run(["node", file_tmp_to_run], capture_output=True) node_js_stdout = node_running_process.stdout node_js_stderr = node_running_process.stderr end_res = node_running_process.stdout # Formatting end_res = json.loads(end_res) proxy_host = end_res['host'] if proxy_host is None: proxy_port = 0 else: proxy_port = int(end_res['port']) if (not "type" in end_res): #HTTP by default not to break the existing config type=None else: type=end_res["type"] proxy_user = end_res['user'] proxy_password = end_res['password'] try: data_carried = end_res['data'] except: pass except Exception as e: node_js_exec_err = str(e) end_res = None json_messages_js_run = {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err, "data_carried" : data_carried} _delete_pass_file(file_tmp_to_run) # Return if end_res is None: return None, None, None,json_messages_js_run if proxy_host is None: json_messages_js_run['note'] = 'host is set to null' return None, None, None, json_messages_js_run else: if (proxy_user is None) and (proxy_password is None): json_messages_js_run['note'] = 'no authentication needed' return (proxy_host, proxy_port), None, type,json_messages_js_run else: return (proxy_host, proxy_port), (proxy_user, proxy_password), type,json_messages_js_run def run_js_function_io(reply, proxy_verbose_messages, js_function_to_run, return_type = 'BINARY'): # return_type can be: 'BINARY' or None # Create the JS file random_pick = str(random.randint(0,1000000000000000)) file_tmp_to_run = "/tmp/js_function_io_" + random_pick + '.js' _delete_pass_file(file_tmp_to_run) file_js_tmp = open(file_tmp_to_run,'w') file_js_tmp.write(js_function_to_run) file_js_tmp.close() # Build the Payload header, data = proxy_modules.utils.separate_header_and_body(reply) payload = {} payload['reply'] = {} try: payload['reply']["code"] = proxy_modules.utils.fetch_response_code(reply) except: payload['reply']["code"] = None try: payload['reply']['header'] = header.decode('utf-8') except: payload['reply']['header'] = None try: payload['reply']['data'] = gzip.decompress(data).decode('utf-8') except: try: payload['reply']['data'] = data.decode('utf-8') except: payload['reply']['data'] = None payload['proxy'] = proxy_verbose_messages file_tmp_to_load = "/tmp/js_function_io_" + random_pick + '.json' _delete_pass_file(file_tmp_to_load) file_js_tmp = open(file_tmp_to_load,'w') file_js_tmp.write(json.dumps(payload)) file_js_tmp.close() # Run the JS and clean node_js_stdout = b"" node_js_stderr = b"" node_js_exec_err = '' try: node_running_process = subprocess.run(["node", file_tmp_to_run, file_tmp_to_load], capture_output=True) node_js_stdout = node_running_process.stdout node_js_stderr = node_running_process.stderr except Exception as e: node_js_exec_err = str(e) _delete_pass_file(file_tmp_to_run) _delete_pass_file(file_tmp_to_load) # Wrap up ! if return_type == 'BINARY': if node_js_exec_err == '': try: end_res = int(node_running_process.stdout.decode('utf-8')) except Exception as e: node_js_exec_err = str(e) end_res = 0 else: end_res = 0 if not end_res in [0, 1]: end_res = 0 node_js_exec_err = "Output should be 0 (not blacklisted) or 1 (blacklisted)" json_messages_js_run = {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err} res = True if end_res == 1 else False, json_messages_js_run return res if return_type is None: return {"stdout": node_js_stdout.decode('utf-8'), "stderr": node_js_stderr.decode('utf-8'), "execerr": node_js_exec_err}
if re.search(node['host_regex'], request_breakdown_res['host']): # Check cached data
__init__.py
from eth_utils import decode_hex class ContractConfigError(Exception):
class InvalidW3Error(Exception): pass class SLContract(): def __init__(self, node, address=None, provided_abi=None): self._contract = None self._node = node if not self._node.w3.isConnected(): raise InvalidW3Error('w3 is not connected in the node you passed in to the Contract constructor') try: if address is None: address = self.__class__.__dict__[node.network_name.upper()] if address is '': raise except: raise ContractConfigError( 'No address given for contract on this network. Did you set the address constant for {}?'.format( node.network_name.upper() ) ) # If on MAINNET, Attempt to resolve try: # This is the best way to verify the hex string address is actually an address. address = node.w3.toChecksumAddress(address) except ValueError: # if on mainnet, we can attempt to resolve the address if this is really an ENS name. if node.network_name.upper() == 'MAINNET': address = node.ens.address(address) if address is None: raise ContractConfigError('Attempt to resolve contract address from ENS failed.') else: raise ContractConfigError("Given contract address '{}' does not appear to be valid.".format(address)) if self.ABI is None and provided_abi is None: raise ContractConfigError('Could not open the Dapp contract with the given address and ABI.') if provided_abi is None: self._contract = node.w3.eth.contract(address, abi=self.ABI) else: self._contract = node.w3.eth.contract(address, abi=provided_abi) if self._contract == None: raise ContractConfigError('Could not open the Dapp contract with the given address and ABI.') @property def w3(self): return self._node.w3 @property def node(self): return self._node @property def sha3(self): return self._node.w3.sha3 @property def functions(self): return self._contract.functions @property def address(self): return self._contract.address def toWei(self, amount, denomination): return self._node.w3.toWei(amount, denomination) def fromWei(self, amount, denomination): return self._node.w3.fromWei(amount, denomination) def bytes32(self, an_int): return (an_int).to_bytes(32, byteorder='big') def to_sol_addr(self, address): return decode_hex(address.replace('0x','')) def to_bytes_32(self, value): return self.bytes32(value)
pass
chart-afr.js
/******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function
(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, { enumerable: true, get: getter }); /******/ } /******/ }; /******/ /******/ // define __esModule on exports /******/ __webpack_require__.r = function(exports) { /******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) { /******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' }); /******/ } /******/ Object.defineProperty(exports, '__esModule', { value: true }); /******/ }; /******/ /******/ // create a fake namespace object /******/ // mode & 1: value is a module id, require it /******/ // mode & 2: merge all properties of value into the ns /******/ // mode & 4: return value when already ns object /******/ // mode & 8|1: behave like require /******/ __webpack_require__.t = function(value, mode) { /******/ if(mode & 1) value = __webpack_require__(value); /******/ if(mode & 8) return value; /******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; /******/ var ns = Object.create(null); /******/ __webpack_require__.r(ns); /******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value }); /******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key)); /******/ return ns; /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = "/"; /******/ /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 25); /******/ }) /************************************************************************/ /******/ ({ /***/ "./resources/js/chart-afr.js": /*!***********************************!*\ !*** ./resources/js/chart-afr.js ***! \***********************************/ /*! no static exports found */ /***/ (function(module, exports) { // Prepare demo data // Data is joined to map using value of 'hc-key' property by default. // See API docs for 'joinBy' for more info on linking data and map. var data = [['ug', 0], ['ng', 0], ['st', 0], ['tz', 20], ['sl', 0], ['gw', 0], ['cv', 0], ['sc', 0], ['tn', 0], ['mg', 0], ['ke', 20], ['cd', 0], ['fr', 0], ['mr', 0], ['dz', 0], ['er', 0], ['gq', 0], ['mu', 0], ['sn', 0], ['km', 0], ['et', 0], ['ci', 0], ['gh', 0], ['zm', 20], ['na', 20], ['rw', 0], ['sx', 0], ['so', 0], ['cm', 0], ['cg', 0], ['eh', 0], ['bj', 0], ['bf', 0], ['tg', 0], ['ne', 0], ['ly', 0], ['lr', 0], ['mw', 0], ['gm', 0], ['td', 0], ['ga', 0], ['dj', 0], ['bi', 0], ['ao', 20], ['gn', 0], ['zw', 20], ['za', 20], ['mz', 20], ['sz', 0], ['ml', 0], ['bw', 20], ['sd', 0], ['ma', 0], ['eg', 0], ['ls', 0], ['ss', 0], ['cf', 0]]; // Create the chart Highcharts.mapChart('africa', { chart: { map: 'custom/africa', colors: '#000000' }, title: { text: '' }, subtitle: { text: '' }, mapNavigation: { enabled: false, buttonOptions: { verticalAlign: 'bottom' } }, colorAxis: { min: 0, minColor: '#f9e2d7', maxColor: '#f26522' }, label: { style: { cursor: 'pointer' } }, series: [{ data: data, name: 'Random data', states: { hover: { color: '#eeeeee' } }, dataLabels: { enabled: false, format: '{point.name}' } }] }); /***/ }), /***/ 25: /*!*****************************************!*\ !*** multi ./resources/js/chart-afr.js ***! \*****************************************/ /*! no static exports found */ /***/ (function(module, exports, __webpack_require__) { module.exports = __webpack_require__(/*! /Users/stephenmokgosi/landisa/resources/js/chart-afr.js */"./resources/js/chart-afr.js"); /***/ }) /******/ });
__webpack_require__
cod.js
let parent = null; let doubleClickLocationDisplay = null; let cells = []; window.addEventListener("load", function() { parent = document.createElement("div"); parent.id = "parinte"; parent.appendChild(createInputGroup()); parent.addEventListener("click", function onClickParent() { // Nu adaug celulele de mai multe ori parent.removeEventListener("click", onClickParent); setTimeout(createCells, 5000); }); document.body.appendChild(parent); doubleClickLocationDisplay = document.createElement("div"); document.body.appendChild(doubleClickLocationDisplay); document.body.addEventListener("keypress", function onKeyPress1(event) { if (event.key === "c") { // După prima apăsare dezactivez această funcționalitate document.body.removeEventListener("keypress", onKeyPress1); // La fiecare 3 secunde, schimbăm aleator culoarea unei celule. let handle = setInterval(changeCellColor, 3000); document.body.addEventListener("keypress", function onKeyPress2(event) { if (event.key === "s") { document.body.removeEventListener("keypress", onKeyPress2); // Oprește-te din colorat aleator celulele clearInterval(handle); // Resetează-le culoarea de fundal la cea din CSS resetCellColors(); } }); } }); }); /** Funcții ajutătoare pentru localStorage */ function getN() { let value = localStorage.getItem("N"); if (value === null) { value = "6"; setN(value); } return value; } function setN(value) { localStorage.setItem("N", value); } /** Funcții ajutătoare pentru create componentele paginii */ function createRadioButton(name, value, checked) { let radioButton = document.createElement("input"); radioButton.type = "radio"; radioButton.name = name; radioButton.value = value; radioButton.checked = checked; return radioButton; } function createInputGroup() { let N = getN(); let inputGroup = document.createElement("div"); const radioName = "N"; for (let i = 4; i <= 8; ++i) { let radioButton = createRadioButton(radioName, i, i.toString() === N); radioButton.addEventListener("click", function(event) { console.log(`Click pe radio button-ul #${i}`); setN(event.target.value); inputGroup.remove(); }); inputGroup.appendChild(radioButton); } return inputGroup; } function createCells() { let N = getN(); for (let i = 0; i < N; ++i) { let row = document.createElement("div"); row.style.display = "flex"; for (let j = 0; j < N; ++j) { let cell = createCell(i, j); row.appendChild(cell); } parent.appendChild(row); } } function createCell(i, j) { let cell = document.createElement("div"); cell.className = "celula"; const CELL_SIZE = "80px"; cell.style.width = CELL_SIZE; cell.style.height = CELL_SIZE; cell.addEventListener("dblclick", function(event) { console.log(`Dublu click pe celula de pe linia ${i} și coloana ${j}`); cell.textContent = `(${i}, ${j})`; doubleClickLocationDisplay.textContent = `Coordonate dublu click: ${event.x}, ${event.y}`; event.stopPropagation(); }); cell.addEventListener("click", function(event) { event.stopPropagation(); }); // Adaug în listă ca să le pot colora aleator cells.push(cell); return cell; } /** Funcție ajutătoare de generat numere întregi random într-un interval. */ function randomInt(a, b) { return Math.floor(a + Math.random() * (b - a)); } let currentCell = 0; function changeCellColor() { let N = getN(); console.log(`Schimb culoarea celulei #${currentCell}`); let cell = cells[currentCell]; let r = randomInt(0, 255); let g = randomInt(0, 255); let b = randomInt(0, 255); cell.style.backgroundColor = `rgb(${r}, ${g}, ${b})`; currentCell = (currentCell + 1) % (N * N); } function resetCellColors() {
"Resetez culorile celulelor"); for (let cell of cells) { cell.style.backgroundColor = ""; } }
console.log(
key_pair_test.go
package eygo import ( "encoding/json" "strconv" "testing" ) func TestNewKeyPairService(t *testing.T) { driver := NewMockDriver() service := NewKeyPairService(driver) t.Run("it is configured with the given driver", func(t *testing.T) { if service.Driver != driver { t.Errorf("Expected the service to use the given driver") } }) } func TestKeyPairService_All(t *testing.T) { driver := NewMockDriver() service := NewKeyPairService(driver) t.Run("when there are matching keyPairs", func(t *testing.T) { keyPair1 := &KeyPair{ID: 1} keyPair2 := &KeyPair{ID: 2} keyPair3 := &KeyPair{ID: 3} stubKeyPairs(driver, keyPair1, keyPair2, keyPair3) all := service.All(nil) t.Run("it contains all matching keyPairs", func(t *testing.T) { keyPairs := []*KeyPair{keyPair1, keyPair2, keyPair3} if len(all) != len(keyPairs) { t.Errorf("Expected %d keyPairs, got %d", len(keyPairs), len(all))
for _, other := range all { if keyPair.ID == other.ID { found = true } } if !found { t.Errorf("KeyPair %d was not present", keyPair.ID) } } }) }) t.Run("when there are no matching keyPairs", func(t *testing.T) { driver.Reset() t.Run("it is empty", func(t *testing.T) { all := service.All(nil) if len(all) != 0 { t.Errorf("Expected 0 keyPairs, got") } }) }) } func TestKeyPairService_ForUser(t *testing.T) { user := &User{ID: "1"} driver := NewMockDriver() service := NewKeyPairService(driver) t.Run("when there are matching keyPairs", func(t *testing.T) { keyPair1 := &KeyPair{ID: 1} keyPair2 := &KeyPair{ID: 2} keyPair3 := &KeyPair{ID: 3} stubUserKeyPairs(driver, user, keyPair1, keyPair2, keyPair3) all := service.ForUser(user, nil) t.Run("it contains all matching keyPairs", func(t *testing.T) { keyPairs := []*KeyPair{keyPair1, keyPair2, keyPair3} if len(all) != len(keyPairs) { t.Errorf("Expected %d keyPairs, got %d", len(keyPairs), len(all)) } for _, keyPair := range keyPairs { found := false for _, other := range all { if keyPair.ID == other.ID { found = true } } if !found { t.Errorf("KeyPair %d was not present", keyPair.ID) } } }) }) t.Run("when there are no matching keyPairs", func(t *testing.T) { driver.Reset() t.Run("it is empty", func(t *testing.T) { all := service.ForUser(user, nil) if len(all) != 0 { t.Errorf("Expected 0 keyPairs, got") } }) }) } func TestKeyPairService_ForEnvironment(t *testing.T) { environment := &Environment{ID: 1, Name: "Environment 1"} driver := NewMockDriver() service := NewKeyPairService(driver) t.Run("when there are matching keyPairs", func(t *testing.T) { keyPair1 := &KeyPair{ID: 1} keyPair2 := &KeyPair{ID: 2} keyPair3 := &KeyPair{ID: 3} stubEnvironmentKeyPairs(driver, environment, keyPair1, keyPair2, keyPair3) all := service.ForEnvironment(environment, nil) t.Run("it contains all matching keyPairs", func(t *testing.T) { keyPairs := []*KeyPair{keyPair1, keyPair2, keyPair3} if len(all) != len(keyPairs) { t.Errorf("Expected %d keyPairs, got %d", len(keyPairs), len(all)) } for _, keyPair := range keyPairs { found := false for _, other := range all { if keyPair.ID == other.ID { found = true } } if !found { t.Errorf("KeyPair %d was not present", keyPair.ID) } } }) }) t.Run("when there are no matching keyPairs", func(t *testing.T) { driver.Reset() t.Run("it is empty", func(t *testing.T) { all := service.ForEnvironment(environment, nil) if len(all) != 0 { t.Errorf("Expected 0 keyPairs, got") } }) }) } func TestKeyPairService_ForApplication(t *testing.T) { application := &Application{ID: 1, Name: "Application 1"} driver := NewMockDriver() service := NewKeyPairService(driver) t.Run("when there are matching keyPairs", func(t *testing.T) { keyPair1 := &KeyPair{ID: 1} keyPair2 := &KeyPair{ID: 2} keyPair3 := &KeyPair{ID: 3} stubApplicationKeyPairs(driver, application, keyPair1, keyPair2, keyPair3) all := service.ForApplication(application, nil) t.Run("it contains all matching keyPairs", func(t *testing.T) { keyPairs := []*KeyPair{keyPair1, keyPair2, keyPair3} if len(all) != len(keyPairs) { t.Errorf("Expected %d keyPairs, got %d", len(keyPairs), len(all)) } for _, keyPair := range keyPairs { found := false for _, other := range all { if keyPair.ID == other.ID { found = true } } if !found { t.Errorf("KeyPair %d was not present", keyPair.ID) } } }) }) t.Run("when there are no matching keyPairs", func(t *testing.T) { driver.Reset() t.Run("it is empty", func(t *testing.T) { all := service.ForApplication(application, nil) if len(all) != 0 { t.Errorf("Expected 0 keyPairs, got") } }) }) } func stubKeyPairs(driver *MockDriver, keyPairs ...*KeyPair) { pages := make([][]byte, 0) wrapper := struct { KeyPairs []*KeyPair `json:"keyPairs,omitempty"` }{KeyPairs: keyPairs} if encoded, err := json.Marshal(&wrapper); err == nil { pages = append(pages, encoded) driver.AddResponse("get", "keypairs", Response{Pages: pages}) } } func stubUserKeyPairs(driver *MockDriver, user *User, keyPairs ...*KeyPair) { pages := make([][]byte, 0) wrapper := struct { KeyPairs []*KeyPair `json:"keypairs,omitempty"` }{KeyPairs: keyPairs} if encoded, err := json.Marshal(&wrapper); err == nil { pages = append(pages, encoded) driver.AddResponse("get", "users/"+user.ID+"/keypairs", Response{Pages: pages}) } } func stubEnvironmentKeyPairs(driver *MockDriver, environment *Environment, keyPairs ...*KeyPair) { pages := make([][]byte, 0) wrapper := struct { KeyPairs []*KeyPair `json:"keypairs,omitempty"` }{KeyPairs: keyPairs} if encoded, err := json.Marshal(&wrapper); err == nil { pages = append(pages, encoded) driver.AddResponse("get", "environments/"+strconv.Itoa(environment.ID)+"/keypairs", Response{Pages: pages}) } } func stubApplicationKeyPairs(driver *MockDriver, application *Application, keyPairs ...*KeyPair) { pages := make([][]byte, 0) wrapper := struct { KeyPairs []*KeyPair `json:"keypairs,omitempty"` }{KeyPairs: keyPairs} if encoded, err := json.Marshal(&wrapper); err == nil { pages = append(pages, encoded) driver.AddResponse("get", "applications/"+strconv.Itoa(application.ID)+"/keypairs", Response{Pages: pages}) } }
} for _, keyPair := range keyPairs { found := false
form.rs
//! Run with //! //! ```not_rust //! cargo run --example form //! ``` use axum::prelude::*; use serde::Deserialize; use std::net::SocketAddr; #[tokio::main] async fn main()
async fn show_form() -> response::Html<&'static str> { response::Html( r#" <!doctype html> <html> <head></head> <body> <form action="/" method="post"> <label for="name"> Enter your name: <input type="text" name="name"> </label> <label> Enter your email: <input type="text" name="email"> </label> <input type="submit" value="Subscribe!"> </form> </body> </html> "#, ) } #[derive(Deserialize, Debug)] struct Input { name: String, email: String, } async fn accept_form(extract::Form(input): extract::Form<Input>) { dbg!(&input); }
{ // Set the RUST_LOG, if it hasn't been explicitly defined if std::env::var("RUST_LOG").is_err() { std::env::set_var("RUST_LOG", "form=debug") } tracing_subscriber::fmt::fmt() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .init(); // build our application with some routes let app = route("/", get(show_form).post(accept_form)); // run it with hyper let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); tracing::debug!("listening on {}", addr); axum::Server::bind(&addr) .serve(app.into_make_service()) .await .unwrap(); }
client.go
package client import ( "context" "encoding/json" "github.com/apex/log" "github.com/cenkalti/backoff/v4" config2 "github.com/crawlab-team/crawlab-core/config" "github.com/crawlab-team/crawlab-core/constants" "github.com/crawlab-team/crawlab-core/entity" "github.com/crawlab-team/crawlab-core/errors" "github.com/crawlab-team/crawlab-core/grpc/middlewares" "github.com/crawlab-team/crawlab-core/interfaces" "github.com/crawlab-team/crawlab-core/node/config" "github.com/crawlab-team/crawlab-core/utils" grpc2 "github.com/crawlab-team/crawlab-grpc" "github.com/crawlab-team/go-trace" "github.com/spf13/viper" "go.uber.org/dig" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "io" "os" "sync" "time" ) type Client struct { // dependencies nodeCfgSvc interfaces.NodeConfigService // settings cfgPath string address interfaces.Address timeout time.Duration subscribeType string handleMessage bool // internals conn *grpc.ClientConn stream grpc2.NodeService_SubscribeClient msgCh chan *grpc2.StreamMessage err error // grpc clients ModelDelegateClient grpc2.ModelDelegateClient ModelBaseServiceClient grpc2.ModelBaseServiceClient NodeClient grpc2.NodeServiceClient TaskClient grpc2.TaskServiceClient PluginClient grpc2.PluginServiceClient MessageClient grpc2.MessageServiceClient } func (c *Client) Init() (err error) { // do nothing return nil } func (c *Client) Start() (err error) { // connect if err := c.connect(); err != nil { return err } // register rpc services if err := c.Register(); err != nil { return err } // subscribe if err := c.subscribe(); err != nil { return err } // handle stream message if c.handleMessage { go c.handleStreamMessage() } return nil } func (c *Client) Stop() (err error) { // skip if connection is nil if c.conn == nil { return nil } // grpc server address address := c.address.String() // unsubscribe if err := c.unsubscribe(); err != nil { return err } log.Infof("grpc client unsubscribed from %s", address) // close connection if err := c.conn.Close(); err != nil { return err } log.Infof("grpc client disconnected from %s", address) return nil } func (c *Client) Register() (err error) { // model delegate c.ModelDelegateClient = grpc2.NewModelDelegateClient(c.conn) // model base service c.ModelBaseServiceClient = grpc2.NewModelBaseServiceClient(c.conn) // node c.NodeClient = grpc2.NewNodeServiceClient(c.conn) // task c.TaskClient = grpc2.NewTaskServiceClient(c.conn) // plugin c.PluginClient = grpc2.NewPluginServiceClient(c.conn) // message c.MessageClient = grpc2.NewMessageServiceClient(c.conn) // log log.Infof("[GrpcClient] grpc client registered client services") log.Debugf("[GrpcClient] ModelDelegateClient: %v", c.ModelDelegateClient) log.Debugf("[GrpcClient] ModelBaseServiceClient: %v", c.ModelBaseServiceClient) log.Debugf("[GrpcClient] NodeClient: %v", c.NodeClient) log.Debugf("[GrpcClient] TaskClient: %v", c.TaskClient) log.Debugf("[GrpcClient] PluginClient: %v", c.PluginClient) log.Debugf("[GrpcClient] MessageClient: %v", c.MessageClient) return nil } func (c *Client) GetModelDelegateClient() (res grpc2.ModelDelegateClient) { return c.ModelDelegateClient } func (c *Client) GetModelBaseServiceClient() (res grpc2.ModelBaseServiceClient) { return c.ModelBaseServiceClient } func (c *Client) GetNodeClient() grpc2.NodeServiceClient { return c.NodeClient } func (c *Client) GetTaskClient() grpc2.TaskServiceClient { return c.TaskClient } func (c *Client) GetPluginClient() grpc2.PluginServiceClient { return c.PluginClient } func (c *Client) GetMessageClient() grpc2.MessageServiceClient { return c.MessageClient } func (c *Client) SetAddress(address interfaces.Address) { c.address = address } func (c *Client) SetTimeout(timeout time.Duration) { c.timeout = timeout } func (c *Client) SetSubscribeType(value string) { c.subscribeType = value } func (c *Client) SetHandleMessage(handleMessage bool) { c.handleMessage = handleMessage } func (c *Client) Context() (ctx context.Context, cancel context.CancelFunc) { return context.WithTimeout(context.Background(), c.timeout) } func (c *Client) NewRequest(d interface{}) (req *grpc2.Request) { return &grpc2.Request{ NodeKey: c.nodeCfgSvc.GetNodeKey(), Data: c.getRequestData(d), } } func (c *Client) NewPluginRequest(d interface{}) (req *grpc2.PluginRequest) { return &grpc2.PluginRequest{ Name: os.Getenv("CRAWLAB_PLUGIN_NAME"), NodeKey: c.nodeCfgSvc.GetNodeKey(), Data: c.getRequestData(d), } } func (c *Client) GetConfigPath() (path string) { return c.cfgPath } func (c *Client) SetConfigPath(path string) { c.cfgPath = path } func (c *Client) NewModelBaseServiceRequest(id interfaces.ModelId, params interfaces.GrpcBaseServiceParams) (req *grpc2.Request, err error) { data, err := json.Marshal(params) if err != nil { return nil, trace.TraceError(err) } msg := &entity.GrpcBaseServiceMessage{ ModelId: id, Data: data, } return c.NewRequest(msg), nil } func (c *Client) GetMessageChannel() (msgCh chan *grpc2.StreamMessage) { return c.msgCh } func (c *Client) Restart() (err error) { if c.needRestart() { return c.Start() } return nil } func (c *Client) IsStarted() (res bool) { return c.conn != nil } func (c *Client) IsClosed() (res bool) { if c.conn != nil { return c.conn.GetState() == connectivity.Shutdown } return false } func (c *Client) Err() (err error) { return c.err } func (c *Client) GetStream() (stream grpc2.NodeService_SubscribeClient) { return c.stream } func (c *Client) connect() (err error) { return backoff.RetryNotify(c._connect, backoff.NewExponentialBackOff(), utils.BackoffErrorNotify("grpc client connect")) } func (c *Client) _connect() (err error) { // grpc server address address := c.address.String() // timeout context ctx, cancel := context.WithTimeout(context.Background(), c.timeout) defer cancel() // connection // TODO: configure dial options var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithBlock()) opts = append(opts, grpc.WithChainUnaryInterceptor(middlewares.GetAuthTokenUnaryChainInterceptor(c.nodeCfgSvc))) opts = append(opts, grpc.WithChainStreamInterceptor(middlewares.GetAuthTokenStreamChainInterceptor(c.nodeCfgSvc))) c.conn, err = grpc.DialContext(ctx, address, opts...) if err != nil { _ = trace.TraceError(err) return errors.ErrorGrpcClientFailedToStart } log.Infof("[GrpcClient] grpc client connected to %s", address) return nil } func (c *Client) subscribe() (err error) { var op func() error switch c.subscribeType { case constants.GrpcSubscribeTypeNode: op = c._subscribeNode case constants.GrpcSubscribeTypePlugin: op = c._subscribePlugin default: return errors.ErrorGrpcInvalidType } return backoff.RetryNotify(op, backoff.NewExponentialBackOff(), utils.BackoffErrorNotify("grpc client subscribe")) } func (c *Client) _subscribeNode() (err error) { req := c.NewRequest(&entity.NodeInfo{ Key: c.nodeCfgSvc.GetNodeKey(), IsMaster: false, }) c.stream, err = c.GetNodeClient().Subscribe(context.Background(), req) if err != nil { return trace.TraceError(err) } // log log.Infof("[GrpcClient] grpc client subscribed to remote server") return nil } func (c *Client) _subscribePlugin() (err error) { req := c.NewPluginRequest(nil) c.stream, err = c.GetPluginClient().Subscribe(context.Background(), req) if err != nil { return trace.TraceError(err) } // log log.Infof("[GrpcClient] grpc client subscribed to remote server") return nil } func (c *Client) unsubscribe() (err error) { req := c.NewRequest(&entity.NodeInfo{ Key: c.nodeCfgSvc.GetNodeKey(), IsMaster: false, }) if _, err = c.GetNodeClient().Unsubscribe(context.Background(), req); err != nil { return trace.TraceError(err) } return nil } func (c *Client) handleStreamMessage() { log.Infof("[GrpcClient] start handling stream message...") for { // resubscribe if stream is set to nil if c.stream == nil { if err := backoff.RetryNotify(c.subscribe, backoff.NewExponentialBackOff(), utils.BackoffErrorNotify("grpc client subscribe")); err != nil { log.Errorf("subscribe") return } } // receive stream message msg, err := c.stream.Recv() log.Debugf("[GrpcClient] received message: %v", msg) if err != nil { // set error c.err = err // end if err == io.EOF { log.Infof("[GrpcClient] received EOF signal, disconnecting") return } // connection closed if c.IsClosed() { return } // error trace.PrintError(err) c.stream = nil time.Sleep(1 * time.Second) continue } // send stream message to channel c.msgCh <- msg // reset error c.err = nil } } func (c *Client) needRestart() bool { switch c.conn.GetState() { case connectivity.Shutdown, connectivity.TransientFailure: return true case connectivity.Idle, connectivity.Connecting, connectivity.Ready: return false default: return false } } func (c *Client) getRequestData(d interface{}) (data []byte) { if d == nil { return data } switch d.(type) { case []byte: data = d.([]byte) default: var err error data, err = json.Marshal(d) if err != nil { panic(err) } } return data } func NewClient(opts ...Option) (res interfaces.GrpcClient, err error) { // client client := &Client{ address: entity.NewAddress(&entity.AddressOptions{ Host: constants.DefaultGrpcClientRemoteHost, Port: constants.DefaultGrpcClientRemotePort, }), timeout: 10 * time.Second, msgCh: make(chan *grpc2.StreamMessage), subscribeType: constants.GrpcSubscribeTypeNode, handleMessage: true, } // apply options for _, opt := range opts { opt(client) } // dependency injection c := dig.New() if err := c.Provide(config.ProvideConfigService(client.GetConfigPath())); err != nil { return nil, err } if err := c.Invoke(func(nodeCfgSvc interfaces.NodeConfigService) { client.nodeCfgSvc = nodeCfgSvc }); err != nil { return nil, err } // init if err := client.Init(); err != nil { return nil, err } return client, nil } func ProvideClient(path string, opts ...Option) func() (res interfaces.GrpcClient, err error) { if path == "" { path = config2.DefaultConfigPath } opts = append(opts, WithConfigPath(path)) return func() (res interfaces.GrpcClient, err error) { return NewClient(opts...) } } var clientStore = sync.Map{} func GetClient(path string, opts ...Option) (c interfaces.GrpcClient, err error) { // normalize path if path == "" { path = config2.DefaultConfigPath } log.Debugf("[GetClient] path: %s", path) res, ok := clientStore.Load(path) if !ok { return createClient(path, opts...) } c, ok = res.(interfaces.GrpcClient) if !ok { return createClient(path, opts...) } return c, nil } func ForceGetClient(path string, opts ...Option) (p interfaces.GrpcClient, err error) { return createClient(path, opts...) } func createClient(path string, opts ...Option) (client2 interfaces.GrpcClient, err error)
func ProvideGetClient(path string, opts ...Option) func() (res interfaces.GrpcClient, err error) { return func() (res interfaces.GrpcClient, err error) { return GetClient(path, opts...) } }
{ viperAddress := viper.GetString("grpc.address") if viperAddress != "" { address, err := entity.NewAddressFromString(viperAddress) if err != nil { return nil, err } opts = append(opts, WithAddress(address)) } viperCfgPath := viper.GetString("config.path") if viperCfgPath != "" { opts = append(opts, WithConfigPath(viperCfgPath)) } c := dig.New() if err := c.Provide(ProvideClient(path, opts...)); err != nil { return nil, trace.TraceError(err) } if err := c.Invoke(func(client interfaces.GrpcClient) { client2 = client }); err != nil { return nil, trace.TraceError(err) } clientStore.Store(path, client2) return client2, nil }
test_survey_endpoints.py
# -*- coding: utf-8 -*- """ Testing class for survey endpoints of the Castor EDC API Wrapper. Link: https://data.castoredc.com/api#/survey @author: R.C.A. van Linschoten https://orcid.org/0000-0003-3052-596X """ import pytest from exceptions.exceptions import CastorException from tests.test_api_endpoints.data_models import ( survey_model, package_model, survey_package_instance_model, ) def create_survey_package_instance_body(record_id, fake): if fake: random_package = "FAKE" else: random_package = "71C01598-4682-4A4C-90E6-69C0BD38EA47" return { "survey_package_id": random_package, "record_id": record_id, "ccr_patient_id": None, "email_address": "[email protected]", "package_invitation_subject": None, "package_invitation": None, "auto_send": None, "auto_lock_on_finish": None, } class TestSurveyEndpoints: s_model_keys = survey_model.keys() p_model_keys = package_model.keys() i_model_keys = survey_package_instance_model.keys() test_survey = { "id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E", "survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E", "name": "QOL Survey", "description": "", "intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.", "outro_text": "", "survey_steps": [], "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E" } }, } test_survey_package = { "id": "71C01598-4682-4A4C-90E6-69C0BD38EA47", "survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47", "name": "My first survey package", "description": "", "sender_name": "Castor EDC", "auto_send": False, "allow_step_navigation": True, "show_step_navigator": True, "finish_url": "", "auto_lock_on_finish": False, "intro_text": "```\n\n\n```\n#### To be able to send surveys, you have to create a survey package that will contain the survey(s) you want to send.\n```\n\n\n```\nHere you can add intro text. This is similar to the intro text in a survey itself, but since a survey package can contain multiple surveys, this is a 'general' introduction that appears in the very beginning.", "outro_text": "```\n\n\n```\n#### You can now create your own survey! \n```\n\n\n```\n#### Here is a giphy: \n```\n\n\n```\n![alt text](https://media.giphy.com/media/BUXk0VHa2Weis/giphy.gif).", "default_invitation": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}', "default_invitation_subject": "Please fill in this survey for Example Study", "sender_email": "[email protected]", "is_mobile": False, "expire_after_hours": None, "_embedded": { "surveys": [ { "id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E", "survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E", "name": "QOL Survey", "description": "", "intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.", "outro_text": "", "survey_steps": [], "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E" } }, } ] }, "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackage/71C01598-4682-4A4C-90E6-69C0BD38EA47" } }, } test_survey_instance = { "id": "115DF660-A00A-4927-9E5F-A07D030D4A09", "survey_package_instance_id": "115DF660-A00A-4927-9E5F-A07D030D4A09", "record_id": "000001", "institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5", "institute_name": "Test Institute", "survey_package_name": "My first survey package", "survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47", "survey_url_string": "DUQKNQNN", "progress": 100, "invitation_subject": "Please fill in this survey for Example Study", "invitation_content": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}', "created_on": { "date": "2019-10-14 09:42:27.000000", "timezone_type": 3, "timezone": "Europe/Amsterdam", }, "created_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25", "available_from": { "date": "2019-10-14 09:42:27.000000", "timezone_type": 3, "timezone": "Europe/Amsterdam", }, "expire_on": None, "sent_on": None, "first_opened_on": None, "finished_on": { "date": "2020-08-14 16:27:12.000000", "timezone_type": 3, "timezone": "Europe/Amsterdam", }, "locked": False, "archived": False, "auto_lock_on_finish": False, "auto_send": False, "_embedded": { "record": { "id": "000001", "record_id": "000001", "ccr_patient_id": "", "last_opened_step": "FFF23B2C-AEE6-4304-9CC4-9C7C431D5387", "progress": 28, "status": "open", "archived": False, "archived_reason": None, "created_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25", "created_on": { "date": "2019-10-07 16:16:02.000000", "timezone_type": 3, "timezone": "Europe/Amsterdam", }, "updated_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25", "updated_on": { "date": "2020-11-27 14:37:55.000000", "timezone_type": 3, "timezone": "Europe/Amsterdam", }, "randomized_id": None, "randomized_on": None, "randomization_group": None, "randomization_group_name": None, "_embedded": { "institute": { "id": "1CFF5802-0B07-471F-B97E-B5166332F2C5", "institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5", "name": "Test Institute", "abbreviation": "TES", "code": "TES", "order": 0, "deleted": False, "country_id": 169, "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/institute/1CFF5802-0B07-471F-B97E-B5166332F2C5" } }, } }, "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/record/000001" } }, }, "institute": { "id": "1CFF5802-0B07-471F-B97E-B5166332F2C5", "institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5", "name": "Test Institute", "abbreviation": "TES", "code": "TES", "order": 0, "deleted": False, "country_id": 169, "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/institute/1CFF5802-0B07-471F-B97E-B5166332F2C5" } }, }, "survey_package": { "id": "71C01598-4682-4A4C-90E6-69C0BD38EA47", "survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47", "name": "My first survey package", "description": "", "sender_name": "Castor EDC", "auto_send": False, "allow_step_navigation": True, "show_step_navigator": True, "finish_url": "", "auto_lock_on_finish": False, "intro_text": "```\n\n\n```\n#### To be able to send surveys, you have to create a survey package that will contain the survey(s) you want to send.\n```\n\n\n```\nHere you can add intro text. This is similar to the intro text in a survey itself, but since a survey package can contain multiple surveys, this is a 'general' introduction that appears in the very beginning.", "outro_text": "```\n\n\n```\n#### You can now create your own survey! \n```\n\n\n```\n#### Here is a giphy: \n```\n\n\n```\n![alt text](https://media.giphy.com/media/BUXk0VHa2Weis/giphy.gif).", "default_invitation": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}', "default_invitation_subject": "Please fill in this survey for Example Study", "sender_email": "[email protected]", "is_mobile": False, "expire_after_hours": None, "_embedded": { "surveys": [ { "id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E", "survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E", "name": "QOL Survey", "description": "", "intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.", "outro_text": "", "survey_steps": [], "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E" } }, } ] }, "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackage/71C01598-4682-4A4C-90E6-69C0BD38EA47" } }, }, "survey_instances": [ { "id": "6530D4AB-4705-4864-92AE-B0EC6200E8E5", "progress": 100, "progress_total_fields": 5, "progress_total_fields_not_empty": 5, "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/6530D4AB-4705-4864-92AE-B0EC6200E8E5" } }, } ], "survey_reminders": [], }, "_links": { "self": { "href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackageinstance/115DF660-A00A-4927-9E5F-A07D030D4A09" } }, } @pytest.fixture def all_surveys(self, client): """Get all surveys""" all_surveys = client.all_surveys() return all_surveys @pytest.fixture def all_survey_packages(self, client): """Get all survey packages""" all_survey_packages = client.all_survey_packages() return all_survey_packages @pytest.fixture def all_survey_package_instances(self, client): """Get all survey package instances""" all_survey_package_instances = client.all_survey_package_instances() return all_survey_package_instances # SURVEYS def test_all_surveys(self, all_surveys): """Test the structure returned by all_surveys""" for survey in all_surveys: survey_keys = survey.keys() assert len(survey_keys) == len(self.s_model_keys) for key in survey_keys: assert key in self.s_model_keys assert type(survey[key]) in survey_model[key] def test_single_survey_success(self, client, all_surveys): """Test the structure and data returned by single survey""" survey = client.single_survey("D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E") assert survey == self.test_survey def test_single_survey_fail(self, client, all_surveys): """Test calling on a non-existent survey""" with pytest.raises(CastorException) as e: client.single_survey("D70C1273-B5D8-45CD-BFE8-A0BA75C4FAKE") assert str(e.value) == "404 Entity not found." # SURVEY PACKAGES def test_all_survey_packages(self, all_survey_packages): """Test structure returned by all_survey_packages""" for package in all_survey_packages: package_keys = package.keys() assert len(package_keys) == len(self.p_model_keys) for key in package_keys: assert key in self.p_model_keys assert type(package[key]) in package_model[key] def test_single_survey_package_success(self, client, all_survey_packages): """Test structure and data returned by single_survey_package""" package = client.single_survey_package("71C01598-4682-4A4C-90E6-69C0BD38EA47") assert package == self.test_survey_package def test_single_survey_package_fail(self, client, all_survey_packages): """Test calling on a non-existent survey package""" with pytest.raises(CastorException) as e: client.single_survey_package("71C01598-4682-4A4C-90E6-69C0BD38FAKE") assert str(e.value) == "404 SurveyPackage Not Found" # SURVEY PACKAGE INSTANCES def test_all_survey_package_instances(self, all_survey_package_instances): """Test structure returned by all survey package instances""" for package_instance in all_survey_package_instances: instance_keys = package_instance.keys() assert len(instance_keys) == len(self.i_model_keys) for key in instance_keys: assert key in self.i_model_keys assert type(package_instance[key]) in survey_package_instance_model[key] def test_all_survey_package_instance_record_success( self, client, all_survey_package_instances ): """Test structure retuned by all_survey_package_instances after filtering on record""" instances = client.all_survey_package_instances(record="000002") for instance in instances: assert instance["record_id"] == "000002" instance_keys = instance.keys() assert len(instance_keys) == len(self.i_model_keys) for key in instance_keys: assert key in self.i_model_keys assert type(instance[key]) in survey_package_instance_model[key] def test_all_survey_package_instance_record_fail( self, client, all_survey_package_instances ): """Test filtering on non-existent record""" with pytest.raises(CastorException) as e: client.all_survey_package_instances(record="00FAKE") assert str(e.value) == "404 Not found." def test_single_survey_package_instance_success( self, client, all_survey_package_instances ): """Test data and structure returned by selecting single survey.""" instance = client.single_survey_package_instance( "115DF660-A00A-4927-9E5F-A07D030D4A09" ) assert instance == self.test_survey_instance def test_single_survey_package_instance_fail( self, client, all_survey_package_instances ): """Test querying a non-existent survey.""" with pytest.raises(CastorException) as e: client.single_survey_package_instance( "115DF660-A00A-4927-9E5F-A07D030DFAKE" ) assert str(e.value) == "404 Survey package invitation not found" # POST def test_create_survey_package_instance_success(self, client):
def test_create_survey_package_instance_fail(self, client): """Tests failing to create a new survey package instance by wrong survey_instance_id""" body = create_survey_package_instance_body("000001", fake=True) old_amount = len(client.all_survey_package_instances(record="000001")) with pytest.raises(CastorException) as e: client.create_survey_package_instance(**body) assert str(e.value) == "422 Failed Validation" new_amount = len(client.all_survey_package_instances(record="000001")) assert new_amount == old_amount def test_patch_survey_package_instance_success( self, client, all_survey_package_instances ): """Tests patching (locking/unlocking) a survey_package_instance""" package = client.single_survey_package_instance( "23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C" ) old_status = package["locked"] target_status = not old_status client.patch_survey_package_instance( "23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C", target_status ) package = client.single_survey_package_instance( "23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C" ) new_status = package["locked"] assert new_status is not old_status def test_patch_survey_package_instance_failure( self, client, all_survey_package_instances ): """Tests failing to patch a survey_package_instance""" package = client.single_survey_package_instance( "23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C" ) old_status = package["locked"] target_status = not old_status fake_id = "23B4FD48-BA41-4C9B-BAEF-D5C3DD5FFAKE" with pytest.raises(CastorException) as e: client.patch_survey_package_instance(fake_id, target_status) assert str(e.value) == "404 Survey package invitation not found" package = client.single_survey_package_instance( "23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C" ) new_status = package["locked"] assert new_status is old_status
"""Tests creating a new survey package instance""" old_amount = len(client.all_survey_package_instances(record="000001")) body = create_survey_package_instance_body("000001", fake=False) feedback = client.create_survey_package_instance(**body) new_amount = len(client.all_survey_package_instances(record="000001")) assert feedback["record_id"] == "000001" assert new_amount == old_amount + 1
cs3.go
package identity import ( "context" "net/url" cs3group "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" cs3user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" cs3rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" "github.com/cs3org/reva/pkg/rgrpc/todo/pool" libregraph "github.com/owncloud/libre-graph-api-go" "github.com/owncloud/ocis/graph/pkg/config" "github.com/owncloud/ocis/graph/pkg/service/v0/errorcode" "github.com/owncloud/ocis/ocis-pkg/log" ) type CS3 struct { Config *config.Reva Logger *log.Logger } func (i *CS3) GetUser(ctx context.Context, userID string) (*libregraph.User, error) { client, err := pool.GetGatewayServiceClient(i.Config.Address) if err != nil { i.Logger.Error().Err(err).Msg("could not get client") return nil, errorcode.New(errorcode.ServiceNotAvailable, err.Error()) } res, err := client.GetUserByClaim(ctx, &cs3user.GetUserByClaimRequest{ Claim: "userid", // FIXME add consts to reva Value: userID, }) switch { case err != nil: i.Logger.Error().Err(err).Str("userid", userID).Msg("error sending get user by claim id grpc request") return nil, errorcode.New(errorcode.ServiceNotAvailable, err.Error()) case res.Status.Code != cs3rpc.Code_CODE_OK: if res.Status.Code == cs3rpc.Code_CODE_NOT_FOUND { return nil, errorcode.New(errorcode.ItemNotFound, res.Status.Message) } i.Logger.Error().Err(err).Str("userid", userID).Msg("error sending get user by claim id grpc request") return nil, errorcode.New(errorcode.GeneralException, res.Status.Message) } return CreateUserModelFromCS3(res.User), nil } func (i *CS3) GetUsers(ctx context.Context, queryParam url.Values) ([]*libregraph.User, error) { client, err := pool.GetGatewayServiceClient(i.Config.Address) if err != nil { i.Logger.Error().Err(err).Msg("could not get client") return nil, errorcode.New(errorcode.ServiceNotAvailable, err.Error()) } search := queryParam.Get("search") if search == "" { search = queryParam.Get("$search") } res, err := client.FindUsers(ctx, &cs3user.FindUsersRequest{ // FIXME presence match is currently not implemented, an empty search currently leads to // Unwilling To Perform": Search Error: error parsing filter: (&(objectclass=posixAccount)(|(cn=*)(displayname=*)(mail=*))), error: Present filter match for cn not implemented Filter: search, }) switch { case err != nil: i.Logger.Error().Err(err).Str("search", search).Msg("error sending find users grpc request") return nil, errorcode.New(errorcode.ServiceNotAvailable, err.Error()) case res.Status.Code != cs3rpc.Code_CODE_OK: if res.Status.Code == cs3rpc.Code_CODE_NOT_FOUND { return nil, errorcode.New(errorcode.ItemNotFound, res.Status.Message) } i.Logger.Error().Err(err).Str("search", search).Msg("error sending find users grpc request") return nil, errorcode.New(errorcode.GeneralException, res.Status.Message) } users := make([]*libregraph.User, 0, len(res.Users)) for _, user := range res.Users { users = append(users, CreateUserModelFromCS3(user)) } return users, nil } func (i *CS3) GetGroups(ctx context.Context, queryParam url.Values) ([]*libregraph.Group, error) { client, err := pool.GetGatewayServiceClient(i.Config.Address) if err != nil { i.Logger.Error().Err(err).Msg("could not get client") return nil, errorcode.New(errorcode.ServiceNotAvailable, err.Error()) } search := queryParam.Get("search") if search == "" { search = queryParam.Get("$search") } res, err := client.FindGroups(ctx, &cs3group.FindGroupsRequest{ // FIXME presence match is currently not implemented, an empty search currently leads to // Unwilling To Perform": Search Error: error parsing filter: (&(objectclass=posixAccount)(|(cn=*)(displayname=*)(mail=*))), error: Present filter match for cn not implemented Filter: search, }) switch { case err != nil: i.Logger.Error().Err(err).Str("search", search).Msg("error sending find groups grpc request") return nil, errorcode.New(errorcode.ServiceNotAvailable, err.Error()) case res.Status.Code != cs3rpc.Code_CODE_OK: if res.Status.Code == cs3rpc.Code_CODE_NOT_FOUND { return nil, errorcode.New(errorcode.ItemNotFound, res.Status.Message) } i.Logger.Error().Err(err).Str("search", search).Msg("error sending find groups grpc request") return nil, errorcode.New(errorcode.GeneralException, res.Status.Message) } groups := make([]*libregraph.Group, 0, len(res.Groups)) for _, group := range res.Groups { groups = append(groups, createGroupModelFromCS3(group)) } return groups, nil } func (i *CS3) GetGroup(ctx context.Context, groupID string) (*libregraph.Group, error) { client, err := pool.GetGatewayServiceClient(i.Config.Address) if err != nil { i.Logger.Error().Err(err).Msg("could not get client") return nil, errorcode.New(errorcode.ServiceNotAvailable, err.Error()) } res, err := client.GetGroupByClaim(ctx, &cs3group.GetGroupByClaimRequest{ Claim: "groupid", // FIXME add consts to reva Value: groupID, }) switch { case err != nil: i.Logger.Error().Err(err).Str("groupid", groupID).Msg("error sending get group by claim id grpc request") return nil, errorcode.New(errorcode.ServiceNotAvailable, err.Error()) case res.Status.Code != cs3rpc.Code_CODE_OK: if res.Status.Code == cs3rpc.Code_CODE_NOT_FOUND { return nil, errorcode.New(errorcode.ItemNotFound, res.Status.Message) } i.Logger.Error().Err(err).Str("groupid", groupID).Msg("error sending get group by claim id grpc request") return nil, errorcode.New(errorcode.GeneralException, res.Status.Message) } return createGroupModelFromCS3(res.Group), nil } func createGroupModelFromCS3(g *cs3group.Group) *libregraph.Group
{ if g.Id == nil { g.Id = &cs3group.GroupId{} } return &libregraph.Group{ Id: &g.Id.OpaqueId, OnPremisesDomainName: &g.Id.Idp, OnPremisesSamAccountName: &g.GroupName, DisplayName: &g.DisplayName, Mail: &g.Mail, // TODO when to fetch and expand memberof, usernames or ids? } }
large.rs
use super::{FoliageLayer, TreeBlocks}; use crate::line::Line; use crate::{Decorator, Result}; use i73_base::matcher::BlockMatcher; use i73_base::block::Block; use java_rand::Random; use std::cmp::min; use std::i32; use vocs::position::{dir, Offset, QuadPosition}; use vocs::view::{QuadAssociation, QuadBlocks, QuadMut, QuadPalettes}; const TAU: f64 = 2.0 * 3.14159; #[derive(Default)] pub struct LargeTreeDecorator { blocks: TreeBlocks, settings: LargeTreeSettings, } impl LargeTreeDecorator { fn place_trunk( &self, position: QuadPosition, blocks: &mut QuadBlocks, palette: &QuadPalettes<Block>, log: &QuadAssociation, trunk_height: i32, ) { let mut position = position; for _ in 0..trunk_height { if self.blocks.replace.matches(blocks.get(position, palette)) { blocks.set(position, log); } position = position.offset(dir::Up).unwrap(); } } fn
(&self, height: f64) -> i32 { let height_factor = height / 13.0; min((self.settings.base_foliage_per_y + height_factor * height_factor) as i32, 1) } fn foliage( &self, trunk_height: i32, rng: &mut Random, spread: f64, y_offset: i32, origin: QuadPosition, ) -> Foilage { let branch_factor = self.settings.branch_scale * spread * (rng.next_f32() as f64 + 0.328); let angle = (rng.next_f32() as f64) * TAU; let x = (branch_factor * angle.sin() + 0.5).floor() as i32; let z = (branch_factor * angle.cos() + 0.5).floor() as i32; let branch_length = ((x * x + z * z) as f64).sqrt(); // Determine how low to place the branch start Y, controlled by branch_slope. Longer branches have lower starts on the trunk. let slope = (branch_length * self.settings.branch_slope) as i32; let branch_base = min(y_offset - slope, trunk_height); Foilage { base: origin.offset((x as i8, y_offset as i8, z as i8)).unwrap(), branch_y_offset: branch_base, } } } impl Decorator for LargeTreeDecorator { fn generate( &self, quad: &mut QuadMut<Block>, rng: &mut Random, position: QuadPosition, ) -> Result { let mut rng = Random::new(rng.next_u64()); let height = self.settings.min_height + rng.next_i32_bound(self.settings.add_height + 1); let trunk_height = min((height as f64 * self.settings.trunk_height_scale) as i32, height - 1); /* TODO if tree.leaves_max_y > 128 { return Ok(()); }*/ let below = match position.offset(dir::Down) { Some(below) => below, None => return Ok(()), }; if !self.blocks.soil.matches(quad.get(below)) { return Ok(()); } // TODO: Check bounding box quad.set_immediate(below, &self.blocks.new_soil); quad.ensure_available(self.blocks.log.clone()); quad.ensure_available(self.blocks.foliage.clone()); let (mut blocks, palette) = quad.freeze_palette(); let log = palette.reverse_lookup(&self.blocks.log).unwrap(); let leaves = palette.reverse_lookup(&self.blocks.foliage).unwrap(); Foilage { base: position.offset((0, (height - 4) as i8, 0)).unwrap(), branch_y_offset: trunk_height, } .place(&mut blocks, &leaves, &palette, &self.blocks.replace); let clusters = self.foliage_per_y(height as f64); for y_offset in ((height * 3) / 10..=height - 4).rev() { for _ in 0..clusters { let spread = 0.5 * f64::sqrt((y_offset as f64) * (i32::abs(height - y_offset) as f64)); let foliage = self.foliage(trunk_height, &mut rng, spread, y_offset, position); foliage.place(&mut blocks, &leaves, &palette, &self.blocks.replace); let tracer = Line { from: QuadPosition::new( position.x(), foliage.branch_y_offset as u8 + position.y(), position.z(), ), to: foliage.base, } .trace(); for limb in tracer { blocks.set(limb, &log); } } } self.place_trunk(position, &mut blocks, &palette, &log, height - 4 + 1); Ok(()) } } /// A foliage cluster. "Balloon" oaks in Minecraft are simply a large tree generating a single foliage cluster at the top of the very short trunk. #[derive(Debug)] pub struct Foilage { /// Location of the leaf cluster, and the endpoint of the branch line. The Y is at the bottom of the cluster. base: QuadPosition, /// Y coordinate of the start of the branch line. The X and Z coordinate are always equal to the orgin of the tree. branch_y_offset: i32, } impl Foilage { fn place( &self, blocks: &mut QuadBlocks, foliage: &QuadAssociation, palette: &QuadPalettes<Block>, replace: &BlockMatcher, ) { let mut position = self.base; FoliageLayer { radius: 1, position }.place(blocks, foliage, palette, replace); for _ in 0..3 { position = position.offset(dir::Up).unwrap(); FoliageLayer { radius: 2, position }.place(blocks, foliage, palette, replace); } position = position.offset(dir::Up).unwrap(); FoliageLayer { radius: 1, position }.place(blocks, foliage, palette, replace); } } #[derive(Debug)] pub struct LargeTreeSettings { /// Makes the branches shorter or longer than the default. branch_scale: f64, /// For every 1 block the branch is long, this multiplier determines how many blocks it will go down on the trunk. branch_slope: f64, /// Default height of the leaves of the foliage clusters, from top to bottom. /// When added to the Y of the cluster, represents the coordinate of the top layer of the leaf cluster. foliage_height: i32, /// Factor in determining the amount of foliage clusters generated on each Y level of the big tree. foliage_density: f64, /// Added to the foliage_per_y value before conversion to i32. base_foliage_per_y: f64, /// How tall the trunk is in comparison to the total height. Should be 0.0 to 1.0. trunk_height_scale: f64, /// Minimum height of the tree. min_height: i32, /// Maximum height that can be added to the minimum. Max height of the tree = min_height + add_height. add_height: i32, } impl Default for LargeTreeSettings { fn default() -> Self { LargeTreeSettings { branch_scale: 1.0, branch_slope: 0.381, foliage_height: 4, foliage_density: 1.0, base_foliage_per_y: 1.382, trunk_height_scale: 0.618, min_height: 5, add_height: 11, } } }
foliage_per_y
native.rs
use super::*; pub struct Connection<S: Message, C: Message> { sender: ws::Sender, broadcaster: ws::Sender, recv: futures::channel::mpsc::UnboundedReceiver<S>, thread_handle: Option<std::thread::JoinHandle<()>>, phantom_data: PhantomData<(S, C)>, traffic: Arc<Mutex<Traffic>>, } impl<S: Message, C: Message> Connection<S, C> { pub fn traffic(&self) -> Traffic { self.traffic.lock().unwrap().clone() } pub fn try_recv(&mut self) -> Option<S> { match self.recv.try_next() { Ok(Some(message)) => Some(message), Err(_) => None, Ok(None) => panic!("Disconnected from server"), } } pub fn send(&mut self, message: C) { trace!("Sending message to server: {:?}", message); let data = serialize_message(message); self.traffic.lock().unwrap().outbound += data.len(); self.sender .send(ws::Message::Binary(data)) .expect("Failed to send message"); } } impl<S: Message, C: Message> Stream for Connection<S, C> { type Item = S; fn poll_next( self: Pin<&mut Self>, cx: &mut std::task::Context, ) -> std::task::Poll<Option<Self::Item>> { Stream::poll_next(unsafe { self.map_unchecked_mut(|pin| &mut pin.recv) }, cx) } } impl<S: Message, C: Message> Drop for Connection<S, C> { fn drop(&mut self) { self.broadcaster.shutdown().unwrap(); self.thread_handle.take().unwrap().join().unwrap(); } } struct Handler<T: Message> { connection_sender: Option<futures::channel::oneshot::Sender<ws::Sender>>, recv_sender: futures::channel::mpsc::UnboundedSender<T>, sender: ws::Sender, traffic: Arc<Mutex<Traffic>>, } impl<T: Message> ws::Handler for Handler<T> { fn on_open(&mut self, _: ws::Handshake) -> ws::Result<()> { info!("Connected to the server"); self.connection_sender .take() .unwrap() .send(self.sender.clone()) .unwrap(); Ok(()) } fn on_message(&mut self, message: ws::Message) -> ws::Result<()> { let data = message.into_data(); self.traffic.lock().unwrap().inbound += data.len(); let message = deserialize_message(&data); trace!("Got message from server: {:?}", message); self.recv_sender.unbounded_send(message).unwrap(); Ok(()) } } struct Factory<T: Message> { connection_sender: Option<futures::channel::oneshot::Sender<ws::Sender>>, recv_sender: Option<futures::channel::mpsc::UnboundedSender<T>>, traffic: Arc<Mutex<Traffic>>, } impl<T: Message> ws::Factory for Factory<T> { type Handler = Handler<T>; fn
(&mut self, sender: ws::Sender) -> Handler<T> { Handler { connection_sender: self.connection_sender.take(), recv_sender: self.recv_sender.take().unwrap(), sender, traffic: self.traffic.clone(), } } } pub fn connect<S: Message, C: Message>(addr: &str) -> impl Future<Output = Connection<S, C>> { let (connection_sender, connection_receiver) = futures::channel::oneshot::channel(); let (recv_sender, recv) = futures::channel::mpsc::unbounded(); let traffic = Arc::new(Mutex::new(Traffic::new())); let factory = Factory { connection_sender: Some(connection_sender), recv_sender: Some(recv_sender), traffic: traffic.clone(), }; let mut ws = ws::WebSocket::new(factory).unwrap(); let mut broadcaster = Some(ws.broadcaster()); ws.connect(addr.parse().unwrap()).unwrap(); let mut thread_handle = Some(std::thread::spawn(move || { ws.run().unwrap(); })); let mut recv = Some(recv); connection_receiver.map(move |sender| Connection { sender: sender.unwrap(), broadcaster: broadcaster.take().unwrap(), recv: recv.take().unwrap(), thread_handle: thread_handle.take(), phantom_data: PhantomData, traffic, }) }
connection_made
main.go
package main import "fmt" func main() { var s = []string{"a", "b", "c", "d", "e"}
s = append(s, "f") s = append(s[0:2], s[4:6]...) fmt.Println(s) // [a b e f] }
hygiene_example.rs
extern crate hygiene_example_codegen; pub use hygiene_example_codegen::hello; pub fn print(string: &str) {
println!("{}", string); }
urls.py
"""root_africa_28472 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
from django.contrib import admin from django.urls import path, include, re_path from django.views.generic.base import TemplateView from allauth.account.views import confirm_email from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi urlpatterns = [ path("", include("home.urls")), path("accounts/", include("allauth.urls")), path("modules/", include("modules.urls")), path("api/v1/", include("home.api.v1.urls")), path("admin/", admin.site.urls), path("users/", include("users.urls", namespace="users")), path("rest-auth/", include("rest_auth.urls")), # Override email confirm to use allauth's HTML view instead of rest_auth's API view path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email), path("rest-auth/registration/", include("rest_auth.registration.urls")), ] admin.site.site_header = "Root Africa" admin.site.site_title = "Root Africa Admin Portal" admin.site.index_title = "Root Africa Admin" # swagger api_info = openapi.Info( title="Root Africa API", default_version="v1", description="API documentation for Root Africa App", ) schema_view = get_schema_view( api_info, public=True, permission_classes=(permissions.IsAuthenticated,), ) urlpatterns += [ path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs") ] urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))] urlpatterns += [re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name='index.html'))]
Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """
NavBar.js
import React, { Component } from 'react'; import './NavBar.scss'; class NavBar extends Component { render () { return ( <nav className="nav-bar">
} export default NavBar;
<div className="app-title">Markdown</div> </nav> ); }
LibPart.d.ts
/* Autogenerated file. Do not edit manually. */ /* tslint:disable */ /* eslint-disable */ import { ethers, EventFilter, Signer, BigNumber, BigNumberish, PopulatedTransaction, BaseContract, ContractTransaction, CallOverrides } from 'ethers'; import { BytesLike } from '@ethersproject/bytes'; import { Listener, Provider } from '@ethersproject/providers'; import { FunctionFragment, EventFragment, Result } from '@ethersproject/abi'; import { TypedEventFilter, TypedEvent, TypedListener } from './commons'; interface LibPartInterface extends ethers.utils.Interface { functions: { 'TYPE_HASH()': FunctionFragment; }; encodeFunctionData(functionFragment: 'TYPE_HASH', values?: undefined): string; decodeFunctionResult(functionFragment: 'TYPE_HASH', data: BytesLike): Result; events: {}; } export class LibPart extends BaseContract { connect(signerOrProvider: Signer | Provider | string): this; attach(addressOrName: string): this; deployed(): Promise<this>;
eventFilter: TypedEventFilter<EventArgsArray, EventArgsObject>, listener: TypedListener<EventArgsArray, EventArgsObject> ): this; on<EventArgsArray extends Array<any>, EventArgsObject>( eventFilter: TypedEventFilter<EventArgsArray, EventArgsObject>, listener: TypedListener<EventArgsArray, EventArgsObject> ): this; once<EventArgsArray extends Array<any>, EventArgsObject>( eventFilter: TypedEventFilter<EventArgsArray, EventArgsObject>, listener: TypedListener<EventArgsArray, EventArgsObject> ): this; removeListener<EventArgsArray extends Array<any>, EventArgsObject>( eventFilter: TypedEventFilter<EventArgsArray, EventArgsObject>, listener: TypedListener<EventArgsArray, EventArgsObject> ): this; removeAllListeners<EventArgsArray extends Array<any>, EventArgsObject>( eventFilter: TypedEventFilter<EventArgsArray, EventArgsObject> ): this; listeners(eventName?: string): Array<Listener>; off(eventName: string, listener: Listener): this; on(eventName: string, listener: Listener): this; once(eventName: string, listener: Listener): this; removeListener(eventName: string, listener: Listener): this; removeAllListeners(eventName?: string): this; queryFilter<EventArgsArray extends Array<any>, EventArgsObject>( event: TypedEventFilter<EventArgsArray, EventArgsObject>, fromBlockOrBlockhash?: string | number | undefined, toBlock?: string | number | undefined ): Promise<Array<TypedEvent<EventArgsArray & EventArgsObject>>>; interface: LibPartInterface; functions: { TYPE_HASH(overrides?: CallOverrides): Promise<[string]>; }; TYPE_HASH(overrides?: CallOverrides): Promise<string>; callStatic: { TYPE_HASH(overrides?: CallOverrides): Promise<string>; }; filters: {}; estimateGas: { TYPE_HASH(overrides?: CallOverrides): Promise<BigNumber>; }; populateTransaction: { TYPE_HASH(overrides?: CallOverrides): Promise<PopulatedTransaction>; }; }
listeners<EventArgsArray extends Array<any>, EventArgsObject>( eventFilter?: TypedEventFilter<EventArgsArray, EventArgsObject> ): Array<TypedListener<EventArgsArray, EventArgsObject>>; off<EventArgsArray extends Array<any>, EventArgsObject>(
chain.go
package itask type Chain struct { terminate bool } func NewChain() *Chain { res := new(Chain) return res } /* Method for check condition. Input function must return bool value, return `false` will terminate the chain call. When MapIf func return false, the following calls will not be executed. */ func (c *Chain) MapIf(f interface{}, args ...interface{}) *Chain { if c.terminate { return c } h := NewHandler(f, args...) c.terminate = !h.BoolCall() return c } /* Continue call iff the chain is not terminated */ func (c *Chain) Call(f interface{}, args ...interface{}) *Chain { if c.terminate
h := NewHandler(f, args...) h.Call() return c } func (c *Chain) Result() bool { return !c.terminate }
{ return c }
expression.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: expression.proto package tipb import ( "fmt" proto "github.com/golang/protobuf/proto" math "math" io "io" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type ExprType int32 const ( // Values are encoded bytes. ExprType_Null ExprType = 0 ExprType_Int64 ExprType = 1 ExprType_Uint64 ExprType = 2 ExprType_Float32 ExprType = 3 ExprType_Float64 ExprType = 4 ExprType_String ExprType = 5 ExprType_Bytes ExprType = 6 // Mysql specific types. ExprType_MysqlBit ExprType = 101 ExprType_MysqlDecimal ExprType = 102 ExprType_MysqlDuration ExprType = 103 ExprType_MysqlEnum ExprType = 104 ExprType_MysqlHex ExprType = 105 ExprType_MysqlSet ExprType = 106 ExprType_MysqlTime ExprType = 107 ExprType_MysqlJson ExprType = 108 // Encoded value list. ExprType_ValueList ExprType = 151 // Column reference. value is int64 column ID. ExprType_ColumnRef ExprType = 201 // Unary operations, children count 1. ExprType_Not ExprType = 1001 ExprType_Neg ExprType = 1002 ExprType_BitNeg ExprType = 1003 // Comparison operations. ExprType_LT ExprType = 2001 ExprType_LE ExprType = 2002 ExprType_EQ ExprType = 2003 ExprType_NE ExprType = 2004 ExprType_GE ExprType = 2005 ExprType_GT ExprType = 2006 ExprType_NullEQ ExprType = 2007 // Bit operations. ExprType_BitAnd ExprType = 2101 ExprType_BitOr ExprType = 2102 ExprType_BitXor ExprType = 2103 ExprType_LeftShift ExprType = 2104 ExprType_RighShift ExprType = 2105 // Arithmatic. ExprType_Plus ExprType = 2201 ExprType_Minus ExprType = 2202 ExprType_Mul ExprType = 2203 ExprType_Div ExprType = 2204 ExprType_IntDiv ExprType = 2205 ExprType_Mod ExprType = 2206 // Logic operations. ExprType_And ExprType = 2301 ExprType_Or ExprType = 2302 ExprType_Xor ExprType = 2303 // Aggregate functions. ExprType_Count ExprType = 3001 ExprType_Sum ExprType = 3002 ExprType_Avg ExprType = 3003 ExprType_Min ExprType = 3004 ExprType_Max ExprType = 3005 ExprType_First ExprType = 3006 ExprType_GroupConcat ExprType = 3007 // Math functions. ExprType_Abs ExprType = 3101 ExprType_Pow ExprType = 3102 ExprType_Round ExprType = 3103 // String functions. ExprType_Concat ExprType = 3201 ExprType_ConcatWS ExprType = 3202 ExprType_Left ExprType = 3203 ExprType_Length ExprType = 3204 ExprType_Lower ExprType = 3205 ExprType_Repeat ExprType = 3206 ExprType_Replace ExprType = 3207 ExprType_Upper ExprType = 3208 ExprType_Strcmp ExprType = 3209 ExprType_Convert ExprType = 3210 ExprType_Cast ExprType = 3211 ExprType_Substring ExprType = 3212 ExprType_SubstringIndex ExprType = 3213 ExprType_Locate ExprType = 3214 ExprType_Trim ExprType = 3215 // Control flow functions. ExprType_If ExprType = 3301 ExprType_NullIf ExprType = 3302 ExprType_IfNull ExprType = 3303 // Time functions. ExprType_Date ExprType = 3401 ExprType_DateAdd ExprType = 3402 ExprType_DateSub ExprType = 3403 ExprType_Year ExprType = 3411 ExprType_YearWeek ExprType = 3412 ExprType_Month ExprType = 3421 ExprType_Week ExprType = 3431 ExprType_Weekday ExprType = 3432 ExprType_WeekOfYear ExprType = 3433 ExprType_Day ExprType = 3441 ExprType_DayName ExprType = 3442 ExprType_DayOfYear ExprType = 3443 ExprType_DayOfMonth ExprType = 3444 ExprType_DayOfWeek ExprType = 3445 ExprType_Hour ExprType = 3451 ExprType_Minute ExprType = 3452 ExprType_Second ExprType = 3453 ExprType_Microsecond ExprType = 3454 ExprType_Extract ExprType = 3461 // Other functions; ExprType_Coalesce ExprType = 3501 ExprType_Greatest ExprType = 3502 ExprType_Least ExprType = 3503 // Json functions; ExprType_JsonExtract ExprType = 3601 ExprType_JsonType ExprType = 3602 ExprType_JsonArray ExprType = 3603 ExprType_JsonObject ExprType = 3604 ExprType_JsonMerge ExprType = 3605 ExprType_JsonValid ExprType = 3606 ExprType_JsonSet ExprType = 3607 ExprType_JsonInsert ExprType = 3608 ExprType_JsonReplace ExprType = 3609 ExprType_JsonRemove ExprType = 3610 ExprType_JsonContains ExprType = 3611 ExprType_JsonUnquote ExprType = 3612 ExprType_JsonContainsPath ExprType = 3613 // Other expressions. ExprType_In ExprType = 4001 ExprType_IsTruth ExprType = 4002 ExprType_IsNull ExprType = 4003 ExprType_ExprRow ExprType = 4004 ExprType_Like ExprType = 4005 ExprType_RLike ExprType = 4006 ExprType_Case ExprType = 4007 // Scalar Function ExprType_ScalarFunc ExprType = 10000 ) var ExprType_name = map[int32]string{ 0: "Null", 1: "Int64", 2: "Uint64", 3: "Float32", 4: "Float64", 5: "String", 6: "Bytes", 101: "MysqlBit", 102: "MysqlDecimal", 103: "MysqlDuration", 104: "MysqlEnum", 105: "MysqlHex", 106: "MysqlSet", 107: "MysqlTime", 108: "MysqlJson", 151: "ValueList", 201: "ColumnRef", 1001: "Not", 1002: "Neg", 1003: "BitNeg", 2001: "LT", 2002: "LE", 2003: "EQ", 2004: "NE", 2005: "GE", 2006: "GT", 2007: "NullEQ", 2101: "BitAnd", 2102: "BitOr", 2103: "BitXor", 2104: "LeftShift", 2105: "RighShift", 2201: "Plus", 2202: "Minus", 2203: "Mul", 2204: "Div", 2205: "IntDiv", 2206: "Mod", 2301: "And", 2302: "Or", 2303: "Xor", 3001: "Count", 3002: "Sum", 3003: "Avg", 3004: "Min", 3005: "Max", 3006: "First", 3007: "GroupConcat", 3101: "Abs", 3102: "Pow", 3103: "Round", 3201: "Concat", 3202: "ConcatWS", 3203: "Left", 3204: "Length", 3205: "Lower", 3206: "Repeat", 3207: "Replace", 3208: "Upper", 3209: "Strcmp", 3210: "Convert", 3211: "Cast", 3212: "Substring", 3213: "SubstringIndex", 3214: "Locate", 3215: "Trim", 3301: "If", 3302: "NullIf", 3303: "IfNull", 3401: "Date", 3402: "DateAdd", 3403: "DateSub", 3411: "Year", 3412: "YearWeek", 3421: "Month", 3431: "Week", 3432: "Weekday", 3433: "WeekOfYear", 3441: "Day", 3442: "DayName", 3443: "DayOfYear", 3444: "DayOfMonth", 3445: "DayOfWeek", 3451: "Hour", 3452: "Minute", 3453: "Second", 3454: "Microsecond", 3461: "Extract", 3501: "Coalesce", 3502: "Greatest", 3503: "Least", 3601: "JsonExtract", 3602: "JsonType", 3603: "JsonArray", 3604: "JsonObject", 3605: "JsonMerge", 3606: "JsonValid", 3607: "JsonSet", 3608: "JsonInsert", 3609: "JsonReplace", 3610: "JsonRemove", 3611: "JsonContains", 3612: "JsonUnquote", 3613: "JsonContainsPath", 4001: "In", 4002: "IsTruth", 4003: "IsNull", 4004: "ExprRow", 4005: "Like", 4006: "RLike", 4007: "Case", 10000: "ScalarFunc", } var ExprType_value = map[string]int32{ "Null": 0, "Int64": 1, "Uint64": 2, "Float32": 3, "Float64": 4, "String": 5, "Bytes": 6, "MysqlBit": 101, "MysqlDecimal": 102, "MysqlDuration": 103, "MysqlEnum": 104, "MysqlHex": 105, "MysqlSet": 106, "MysqlTime": 107, "MysqlJson": 108, "ValueList": 151, "ColumnRef": 201, "Not": 1001, "Neg": 1002, "BitNeg": 1003, "LT": 2001, "LE": 2002, "EQ": 2003, "NE": 2004, "GE": 2005, "GT": 2006, "NullEQ": 2007, "BitAnd": 2101, "BitOr": 2102, "BitXor": 2103, "LeftShift": 2104, "RighShift": 2105, "Plus": 2201, "Minus": 2202, "Mul": 2203, "Div": 2204, "IntDiv": 2205, "Mod": 2206, "And": 2301, "Or": 2302, "Xor": 2303, "Count": 3001, "Sum": 3002, "Avg": 3003, "Min": 3004, "Max": 3005, "First": 3006, "GroupConcat": 3007, "Abs": 3101, "Pow": 3102, "Round": 3103, "Concat": 3201, "ConcatWS": 3202, "Left": 3203, "Length": 3204, "Lower": 3205, "Repeat": 3206, "Replace": 3207, "Upper": 3208, "Strcmp": 3209, "Convert": 3210, "Cast": 3211, "Substring": 3212, "SubstringIndex": 3213, "Locate": 3214, "Trim": 3215, "If": 3301, "NullIf": 3302, "IfNull": 3303, "Date": 3401, "DateAdd": 3402, "DateSub": 3403, "Year": 3411, "YearWeek": 3412, "Month": 3421, "Week": 3431, "Weekday": 3432, "WeekOfYear": 3433, "Day": 3441, "DayName": 3442, "DayOfYear": 3443, "DayOfMonth": 3444, "DayOfWeek": 3445, "Hour": 3451, "Minute": 3452, "Second": 3453, "Microsecond": 3454, "Extract": 3461, "Coalesce": 3501, "Greatest": 3502, "Least": 3503, "JsonExtract": 3601, "JsonType": 3602, "JsonArray": 3603, "JsonObject": 3604, "JsonMerge": 3605, "JsonValid": 3606, "JsonSet": 3607, "JsonInsert": 3608, "JsonReplace": 3609, "JsonRemove": 3610, "JsonContains": 3611, "JsonUnquote": 3612, "JsonContainsPath": 3613, "In": 4001, "IsTruth": 4002, "IsNull": 4003, "ExprRow": 4004, "Like": 4005, "RLike": 4006, "Case": 4007, "ScalarFunc": 10000, } func (x ExprType) Enum() *ExprType { p := new(ExprType) *p = x return p } func (x ExprType) String() string { return proto.EnumName(ExprType_name, int32(x)) } func (x *ExprType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ExprType_value, data, "ExprType") if err != nil { return err } *x = ExprType(value) return nil } func (ExprType) EnumDescriptor() ([]byte, []int) { return fileDescriptorExpression, []int{0} } type ScalarFuncSig int32 const ( // Casting ScalarFuncSig_CastIntAsInt ScalarFuncSig = 0 ScalarFuncSig_CastIntAsReal ScalarFuncSig = 1 ScalarFuncSig_CastIntAsString ScalarFuncSig = 2 ScalarFuncSig_CastIntAsDecimal ScalarFuncSig = 3 ScalarFuncSig_CastIntAsTime ScalarFuncSig = 4 ScalarFuncSig_CastIntAsDuration ScalarFuncSig = 5 ScalarFuncSig_CastIntAsJson ScalarFuncSig = 6 ScalarFuncSig_CastRealAsInt ScalarFuncSig = 10 ScalarFuncSig_CastRealAsReal ScalarFuncSig = 11 ScalarFuncSig_CastRealAsString ScalarFuncSig = 12 ScalarFuncSig_CastRealAsDecimal ScalarFuncSig = 13 ScalarFuncSig_CastRealAsTime ScalarFuncSig = 14 ScalarFuncSig_CastRealAsDuration ScalarFuncSig = 15 ScalarFuncSig_CastRealAsJson ScalarFuncSig = 16 ScalarFuncSig_CastDecimalAsInt ScalarFuncSig = 20 ScalarFuncSig_CastDecimalAsReal ScalarFuncSig = 21 ScalarFuncSig_CastDecimalAsString ScalarFuncSig = 22 ScalarFuncSig_CastDecimalAsDecimal ScalarFuncSig = 23 ScalarFuncSig_CastDecimalAsTime ScalarFuncSig = 24 ScalarFuncSig_CastDecimalAsDuration ScalarFuncSig = 25 ScalarFuncSig_CastDecimalAsJson ScalarFuncSig = 26 ScalarFuncSig_CastStringAsInt ScalarFuncSig = 30 ScalarFuncSig_CastStringAsReal ScalarFuncSig = 31 ScalarFuncSig_CastStringAsString ScalarFuncSig = 32 ScalarFuncSig_CastStringAsDecimal ScalarFuncSig = 33 ScalarFuncSig_CastStringAsTime ScalarFuncSig = 34 ScalarFuncSig_CastStringAsDuration ScalarFuncSig = 35 ScalarFuncSig_CastStringAsJson ScalarFuncSig = 36 ScalarFuncSig_CastTimeAsInt ScalarFuncSig = 40 ScalarFuncSig_CastTimeAsReal ScalarFuncSig = 41 ScalarFuncSig_CastTimeAsString ScalarFuncSig = 42 ScalarFuncSig_CastTimeAsDecimal ScalarFuncSig = 43 ScalarFuncSig_CastTimeAsTime ScalarFuncSig = 44 ScalarFuncSig_CastTimeAsDuration ScalarFuncSig = 45 ScalarFuncSig_CastTimeAsJson ScalarFuncSig = 46 ScalarFuncSig_CastDurationAsInt ScalarFuncSig = 50 ScalarFuncSig_CastDurationAsReal ScalarFuncSig = 51 ScalarFuncSig_CastDurationAsString ScalarFuncSig = 52 ScalarFuncSig_CastDurationAsDecimal ScalarFuncSig = 53 ScalarFuncSig_CastDurationAsTime ScalarFuncSig = 54 ScalarFuncSig_CastDurationAsDuration ScalarFuncSig = 55 ScalarFuncSig_CastDurationAsJson ScalarFuncSig = 56 ScalarFuncSig_CastJsonAsInt ScalarFuncSig = 60 ScalarFuncSig_CastJsonAsReal ScalarFuncSig = 61 ScalarFuncSig_CastJsonAsString ScalarFuncSig = 62 ScalarFuncSig_CastJsonAsDecimal ScalarFuncSig = 63 ScalarFuncSig_CastJsonAsTime ScalarFuncSig = 64 ScalarFuncSig_CastJsonAsDuration ScalarFuncSig = 65 ScalarFuncSig_CastJsonAsJson ScalarFuncSig = 66 ScalarFuncSig_LTInt ScalarFuncSig = 100 ScalarFuncSig_LTReal ScalarFuncSig = 101 ScalarFuncSig_LTDecimal ScalarFuncSig = 102 ScalarFuncSig_LTString ScalarFuncSig = 103 ScalarFuncSig_LTTime ScalarFuncSig = 104 ScalarFuncSig_LTDuration ScalarFuncSig = 105 ScalarFuncSig_LTJson ScalarFuncSig = 106 ScalarFuncSig_LEInt ScalarFuncSig = 110 ScalarFuncSig_LEReal ScalarFuncSig = 111 ScalarFuncSig_LEDecimal ScalarFuncSig = 112 ScalarFuncSig_LEString ScalarFuncSig = 113 ScalarFuncSig_LETime ScalarFuncSig = 114 ScalarFuncSig_LEDuration ScalarFuncSig = 115 ScalarFuncSig_LEJson ScalarFuncSig = 116 ScalarFuncSig_GTInt ScalarFuncSig = 120 ScalarFuncSig_GTReal ScalarFuncSig = 121 ScalarFuncSig_GTDecimal ScalarFuncSig = 122 ScalarFuncSig_GTString ScalarFuncSig = 123 ScalarFuncSig_GTTime ScalarFuncSig = 124 ScalarFuncSig_GTDuration ScalarFuncSig = 125 ScalarFuncSig_GTJson ScalarFuncSig = 126 ScalarFuncSig_GEInt ScalarFuncSig = 130 ScalarFuncSig_GEReal ScalarFuncSig = 131 ScalarFuncSig_GEDecimal ScalarFuncSig = 132 ScalarFuncSig_GEString ScalarFuncSig = 133 ScalarFuncSig_GETime ScalarFuncSig = 134 ScalarFuncSig_GEDuration ScalarFuncSig = 135 ScalarFuncSig_GEJson ScalarFuncSig = 136 ScalarFuncSig_EQInt ScalarFuncSig = 140 ScalarFuncSig_EQReal ScalarFuncSig = 141 ScalarFuncSig_EQDecimal ScalarFuncSig = 142 ScalarFuncSig_EQString ScalarFuncSig = 143 ScalarFuncSig_EQTime ScalarFuncSig = 144 ScalarFuncSig_EQDuration ScalarFuncSig = 145 ScalarFuncSig_EQJson ScalarFuncSig = 146 ScalarFuncSig_NEInt ScalarFuncSig = 150 ScalarFuncSig_NEReal ScalarFuncSig = 151 ScalarFuncSig_NEDecimal ScalarFuncSig = 152 ScalarFuncSig_NEString ScalarFuncSig = 153 ScalarFuncSig_NETime ScalarFuncSig = 154 ScalarFuncSig_NEDuration ScalarFuncSig = 155 ScalarFuncSig_NEJson ScalarFuncSig = 156 ScalarFuncSig_NullEQInt ScalarFuncSig = 160 ScalarFuncSig_NullEQReal ScalarFuncSig = 161 ScalarFuncSig_NullEQDecimal ScalarFuncSig = 162 ScalarFuncSig_NullEQString ScalarFuncSig = 163 ScalarFuncSig_NullEQTime ScalarFuncSig = 164 ScalarFuncSig_NullEQDuration ScalarFuncSig = 165 ScalarFuncSig_NullEQJson ScalarFuncSig = 166 ScalarFuncSig_PlusReal ScalarFuncSig = 200 ScalarFuncSig_PlusDecimal ScalarFuncSig = 201 ScalarFuncSig_PlusInt ScalarFuncSig = 203 ScalarFuncSig_MinusReal ScalarFuncSig = 204 ScalarFuncSig_MinusDecimal ScalarFuncSig = 205 ScalarFuncSig_MinusInt ScalarFuncSig = 207 ScalarFuncSig_MultiplyReal ScalarFuncSig = 208 ScalarFuncSig_MultiplyDecimal ScalarFuncSig = 209 ScalarFuncSig_MultiplyInt ScalarFuncSig = 210 ScalarFuncSig_DivideReal ScalarFuncSig = 211 ScalarFuncSig_DivideDecimal ScalarFuncSig = 212 ScalarFuncSig_AbsInt ScalarFuncSig = 2101 ScalarFuncSig_AbsUInt ScalarFuncSig = 2102 ScalarFuncSig_AbsReal ScalarFuncSig = 2103 ScalarFuncSig_AbsDecimal ScalarFuncSig = 2104 ScalarFuncSig_CeilIntToDec ScalarFuncSig = 2105 ScalarFuncSig_CeilIntToInt ScalarFuncSig = 2106 ScalarFuncSig_CeilDecToInt ScalarFuncSig = 2107 ScalarFuncSig_CeilDecToDec ScalarFuncSig = 2108 ScalarFuncSig_CeilReal ScalarFuncSig = 2109 ScalarFuncSig_FloorIntToDec ScalarFuncSig = 2110 ScalarFuncSig_FloorIntToInt ScalarFuncSig = 2111 ScalarFuncSig_FloorDecToInt ScalarFuncSig = 2112 ScalarFuncSig_FloorDecToDec ScalarFuncSig = 2113 ScalarFuncSig_FloorReal ScalarFuncSig = 2114 ScalarFuncSig_LogicalAnd ScalarFuncSig = 3101 ScalarFuncSig_LogicalOr ScalarFuncSig = 3102 ScalarFuncSig_LogicalXor ScalarFuncSig = 3103 ScalarFuncSig_UnaryNot ScalarFuncSig = 3104 ScalarFuncSig_UnaryMinusInt ScalarFuncSig = 3108 ScalarFuncSig_UnaryMinusReal ScalarFuncSig = 3109 ScalarFuncSig_UnaryMinusDecimal ScalarFuncSig = 3110 ScalarFuncSig_DecimalIsNull ScalarFuncSig = 3111 ScalarFuncSig_DurationIsNull ScalarFuncSig = 3112 ScalarFuncSig_RealIsNull ScalarFuncSig = 3113 ScalarFuncSig_StringIsNull ScalarFuncSig = 3114 ScalarFuncSig_TimeIsNull ScalarFuncSig = 3115 ScalarFuncSig_IntIsNull ScalarFuncSig = 3116 ScalarFuncSig_JsonIsNull ScalarFuncSig = 3117 ScalarFuncSig_BitAndSig ScalarFuncSig = 3118 ScalarFuncSig_BitOrSig ScalarFuncSig = 3119 ScalarFuncSig_BitXorSig ScalarFuncSig = 3120 ScalarFuncSig_BitNegSig ScalarFuncSig = 3121 ScalarFuncSig_IntIsTrue ScalarFuncSig = 3122 ScalarFuncSig_RealIsTrue ScalarFuncSig = 3123 ScalarFuncSig_DecimalIsTrue ScalarFuncSig = 3124 ScalarFuncSig_IntIsFalse ScalarFuncSig = 3125 ScalarFuncSig_RealIsFalse ScalarFuncSig = 3126 ScalarFuncSig_DecimalIsFalse ScalarFuncSig = 3127 ScalarFuncSig_InInt ScalarFuncSig = 4001 ScalarFuncSig_InReal ScalarFuncSig = 4002 ScalarFuncSig_InDecimal ScalarFuncSig = 4003 ScalarFuncSig_InString ScalarFuncSig = 4004 ScalarFuncSig_InTime ScalarFuncSig = 4005 ScalarFuncSig_InDuration ScalarFuncSig = 4006 ScalarFuncSig_InJson ScalarFuncSig = 4007 ScalarFuncSig_IfNullInt ScalarFuncSig = 4101 ScalarFuncSig_IfNullReal ScalarFuncSig = 4102 ScalarFuncSig_IfNullDecimal ScalarFuncSig = 4103 ScalarFuncSig_IfNullString ScalarFuncSig = 4104 ScalarFuncSig_IfNullTime ScalarFuncSig = 4105 ScalarFuncSig_IfNullDuration ScalarFuncSig = 4106 ScalarFuncSig_IfInt ScalarFuncSig = 4107 ScalarFuncSig_IfReal ScalarFuncSig = 4108 ScalarFuncSig_IfDecimal ScalarFuncSig = 4109 ScalarFuncSig_IfString ScalarFuncSig = 4110 ScalarFuncSig_IfTime ScalarFuncSig = 4111 ScalarFuncSig_IfDuration ScalarFuncSig = 4112 ScalarFuncSig_IfNullJson ScalarFuncSig = 4113 ScalarFuncSig_IfJson ScalarFuncSig = 4114 ScalarFuncSig_CoalesceInt ScalarFuncSig = 4201 ScalarFuncSig_CoalesceReal ScalarFuncSig = 4202 ScalarFuncSig_CoalesceDecimal ScalarFuncSig = 4203 ScalarFuncSig_CoalesceString ScalarFuncSig = 4204 ScalarFuncSig_CoalesceTime ScalarFuncSig = 4205 ScalarFuncSig_CoalesceDuration ScalarFuncSig = 4206 ScalarFuncSig_CoalesceJson ScalarFuncSig = 4207 ScalarFuncSig_CaseWhenInt ScalarFuncSig = 4208 ScalarFuncSig_CaseWhenReal ScalarFuncSig = 4209 ScalarFuncSig_CaseWhenDecimal ScalarFuncSig = 4210 ScalarFuncSig_CaseWhenString ScalarFuncSig = 4211 ScalarFuncSig_CaseWhenTime ScalarFuncSig = 4212 ScalarFuncSig_CaseWhenDuration ScalarFuncSig = 4213 ScalarFuncSig_CaseWhenJson ScalarFuncSig = 4214 // // Here we use suffix *Sig* to avoid name conflict. After we removes // all same things in ExprType, we can rename them back. ScalarFuncSig_LikeSig ScalarFuncSig = 4310 ScalarFuncSig_JsonExtractSig ScalarFuncSig = 5001 ScalarFuncSig_JsonUnquoteSig ScalarFuncSig = 5002 ScalarFuncSig_JsonTypeSig ScalarFuncSig = 5003 ScalarFuncSig_JsonSetSig ScalarFuncSig = 5004 ScalarFuncSig_JsonInsertSig ScalarFuncSig = 5005 ScalarFuncSig_JsonReplaceSig ScalarFuncSig = 5006 ScalarFuncSig_JsonRemoveSig ScalarFuncSig = 5007 ScalarFuncSig_JsonMergeSig ScalarFuncSig = 5008 ScalarFuncSig_JsonObjectSig ScalarFuncSig = 5009 ScalarFuncSig_JsonArraySig ScalarFuncSig = 5010 ) var ScalarFuncSig_name = map[int32]string{ 0: "CastIntAsInt", 1: "CastIntAsReal", 2: "CastIntAsString", 3: "CastIntAsDecimal", 4: "CastIntAsTime", 5: "CastIntAsDuration", 6: "CastIntAsJson", 10: "CastRealAsInt", 11: "CastRealAsReal", 12: "CastRealAsString", 13: "CastRealAsDecimal", 14: "CastRealAsTime", 15: "CastRealAsDuration", 16: "CastRealAsJson", 20: "CastDecimalAsInt", 21: "CastDecimalAsReal", 22: "CastDecimalAsString", 23: "CastDecimalAsDecimal", 24: "CastDecimalAsTime", 25: "CastDecimalAsDuration", 26: "CastDecimalAsJson", 30: "CastStringAsInt", 31: "CastStringAsReal", 32: "CastStringAsString", 33: "CastStringAsDecimal", 34: "CastStringAsTime", 35: "CastStringAsDuration", 36: "CastStringAsJson", 40: "CastTimeAsInt", 41: "CastTimeAsReal", 42: "CastTimeAsString", 43: "CastTimeAsDecimal", 44: "CastTimeAsTime", 45: "CastTimeAsDuration", 46: "CastTimeAsJson", 50: "CastDurationAsInt", 51: "CastDurationAsReal", 52: "CastDurationAsString", 53: "CastDurationAsDecimal", 54: "CastDurationAsTime", 55: "CastDurationAsDuration", 56: "CastDurationAsJson", 60: "CastJsonAsInt", 61: "CastJsonAsReal", 62: "CastJsonAsString", 63: "CastJsonAsDecimal", 64: "CastJsonAsTime", 65: "CastJsonAsDuration", 66: "CastJsonAsJson", 100: "LTInt", 101: "LTReal", 102: "LTDecimal", 103: "LTString", 104: "LTTime", 105: "LTDuration", 106: "LTJson", 110: "LEInt", 111: "LEReal", 112: "LEDecimal", 113: "LEString", 114: "LETime", 115: "LEDuration", 116: "LEJson", 120: "GTInt", 121: "GTReal", 122: "GTDecimal", 123: "GTString", 124: "GTTime", 125: "GTDuration", 126: "GTJson", 130: "GEInt", 131: "GEReal", 132: "GEDecimal", 133: "GEString", 134: "GETime", 135: "GEDuration", 136: "GEJson", 140: "EQInt", 141: "EQReal", 142: "EQDecimal", 143: "EQString", 144: "EQTime", 145: "EQDuration", 146: "EQJson", 150: "NEInt", 151: "NEReal", 152: "NEDecimal", 153: "NEString", 154: "NETime", 155: "NEDuration", 156: "NEJson", 160: "NullEQInt", 161: "NullEQReal", 162: "NullEQDecimal", 163: "NullEQString", 164: "NullEQTime", 165: "NullEQDuration", 166: "NullEQJson", 200: "PlusReal", 201: "PlusDecimal", 203: "PlusInt", 204: "MinusReal", 205: "MinusDecimal", 207: "MinusInt", 208: "MultiplyReal", 209: "MultiplyDecimal", 210: "MultiplyInt", 211: "DivideReal", 212: "DivideDecimal", 2101: "AbsInt", 2102: "AbsUInt", 2103: "AbsReal", 2104: "AbsDecimal", 2105: "CeilIntToDec", 2106: "CeilIntToInt", 2107: "CeilDecToInt", 2108: "CeilDecToDec", 2109: "CeilReal", 2110: "FloorIntToDec", 2111: "FloorIntToInt", 2112: "FloorDecToInt", 2113: "FloorDecToDec", 2114: "FloorReal", 3101: "LogicalAnd", 3102: "LogicalOr", 3103: "LogicalXor", 3104: "UnaryNot", 3108: "UnaryMinusInt", 3109: "UnaryMinusReal", 3110: "UnaryMinusDecimal", 3111: "DecimalIsNull", 3112: "DurationIsNull", 3113: "RealIsNull", 3114: "StringIsNull", 3115: "TimeIsNull", 3116: "IntIsNull", 3117: "JsonIsNull", 3118: "BitAndSig", 3119: "BitOrSig", 3120: "BitXorSig", 3121: "BitNegSig", 3122: "IntIsTrue", 3123: "RealIsTrue", 3124: "DecimalIsTrue", 3125: "IntIsFalse", 3126: "RealIsFalse", 3127: "DecimalIsFalse", 4001: "InInt", 4002: "InReal", 4003: "InDecimal", 4004: "InString", 4005: "InTime", 4006: "InDuration", 4007: "InJson", 4101: "IfNullInt", 4102: "IfNullReal", 4103: "IfNullDecimal", 4104: "IfNullString", 4105: "IfNullTime", 4106: "IfNullDuration", 4107: "IfInt", 4108: "IfReal", 4109: "IfDecimal", 4110: "IfString", 4111: "IfTime", 4112: "IfDuration", 4113: "IfNullJson", 4114: "IfJson", 4201: "CoalesceInt", 4202: "CoalesceReal", 4203: "CoalesceDecimal", 4204: "CoalesceString", 4205: "CoalesceTime", 4206: "CoalesceDuration", 4207: "CoalesceJson", 4208: "CaseWhenInt", 4209: "CaseWhenReal", 4210: "CaseWhenDecimal", 4211: "CaseWhenString", 4212: "CaseWhenTime", 4213: "CaseWhenDuration", 4214: "CaseWhenJson", 4310: "LikeSig", 5001: "JsonExtractSig", 5002: "JsonUnquoteSig", 5003: "JsonTypeSig", 5004: "JsonSetSig", 5005: "JsonInsertSig", 5006: "JsonReplaceSig", 5007: "JsonRemoveSig", 5008: "JsonMergeSig", 5009: "JsonObjectSig", 5010: "JsonArraySig", } var ScalarFuncSig_value = map[string]int32{ "CastIntAsInt": 0, "CastIntAsReal": 1, "CastIntAsString": 2, "CastIntAsDecimal": 3, "CastIntAsTime": 4, "CastIntAsDuration": 5, "CastIntAsJson": 6, "CastRealAsInt": 10, "CastRealAsReal": 11, "CastRealAsString": 12, "CastRealAsDecimal": 13, "CastRealAsTime": 14, "CastRealAsDuration": 15, "CastRealAsJson": 16, "CastDecimalAsInt": 20, "CastDecimalAsReal": 21, "CastDecimalAsString": 22, "CastDecimalAsDecimal": 23, "CastDecimalAsTime": 24, "CastDecimalAsDuration": 25, "CastDecimalAsJson": 26, "CastStringAsInt": 30, "CastStringAsReal": 31, "CastStringAsString": 32, "CastStringAsDecimal": 33, "CastStringAsTime": 34, "CastStringAsDuration": 35, "CastStringAsJson": 36, "CastTimeAsInt": 40, "CastTimeAsReal": 41, "CastTimeAsString": 42, "CastTimeAsDecimal": 43, "CastTimeAsTime": 44, "CastTimeAsDuration": 45, "CastTimeAsJson": 46, "CastDurationAsInt": 50, "CastDurationAsReal": 51, "CastDurationAsString": 52, "CastDurationAsDecimal": 53, "CastDurationAsTime": 54, "CastDurationAsDuration": 55, "CastDurationAsJson": 56, "CastJsonAsInt": 60, "CastJsonAsReal": 61, "CastJsonAsString": 62, "CastJsonAsDecimal": 63, "CastJsonAsTime": 64, "CastJsonAsDuration": 65, "CastJsonAsJson": 66, "LTInt": 100, "LTReal": 101, "LTDecimal": 102, "LTString": 103, "LTTime": 104, "LTDuration": 105, "LTJson": 106, "LEInt": 110, "LEReal": 111, "LEDecimal": 112, "LEString": 113, "LETime": 114, "LEDuration": 115, "LEJson": 116, "GTInt": 120, "GTReal": 121, "GTDecimal": 122, "GTString": 123, "GTTime": 124, "GTDuration": 125, "GTJson": 126, "GEInt": 130, "GEReal": 131, "GEDecimal": 132, "GEString": 133, "GETime": 134, "GEDuration": 135, "GEJson": 136, "EQInt": 140, "EQReal": 141, "EQDecimal": 142, "EQString": 143, "EQTime": 144, "EQDuration": 145, "EQJson": 146, "NEInt": 150, "NEReal": 151, "NEDecimal": 152, "NEString": 153, "NETime": 154, "NEDuration": 155, "NEJson": 156, "NullEQInt": 160, "NullEQReal": 161, "NullEQDecimal": 162, "NullEQString": 163, "NullEQTime": 164, "NullEQDuration": 165, "NullEQJson": 166, "PlusReal": 200, "PlusDecimal": 201, "PlusInt": 203, "MinusReal": 204, "MinusDecimal": 205, "MinusInt": 207, "MultiplyReal": 208, "MultiplyDecimal": 209, "MultiplyInt": 210, "DivideReal": 211, "DivideDecimal": 212, "AbsInt": 2101, "AbsUInt": 2102, "AbsReal": 2103, "AbsDecimal": 2104, "CeilIntToDec": 2105, "CeilIntToInt": 2106, "CeilDecToInt": 2107, "CeilDecToDec": 2108, "CeilReal": 2109, "FloorIntToDec": 2110, "FloorIntToInt": 2111, "FloorDecToInt": 2112, "FloorDecToDec": 2113, "FloorReal": 2114, "LogicalAnd": 3101, "LogicalOr": 3102, "LogicalXor": 3103, "UnaryNot": 3104, "UnaryMinusInt": 3108, "UnaryMinusReal": 3109, "UnaryMinusDecimal": 3110, "DecimalIsNull": 3111, "DurationIsNull": 3112, "RealIsNull": 3113, "StringIsNull": 3114, "TimeIsNull": 3115, "IntIsNull": 3116, "JsonIsNull": 3117, "BitAndSig": 3118, "BitOrSig": 3119, "BitXorSig": 3120, "BitNegSig": 3121, "IntIsTrue": 3122, "RealIsTrue": 3123, "DecimalIsTrue": 3124, "IntIsFalse": 3125, "RealIsFalse": 3126, "DecimalIsFalse": 3127, "InInt": 4001, "InReal": 4002, "InDecimal": 4003, "InString": 4004, "InTime": 4005, "InDuration": 4006, "InJson": 4007, "IfNullInt": 4101, "IfNullReal": 4102, "IfNullDecimal": 4103, "IfNullString": 4104, "IfNullTime": 4105, "IfNullDuration": 4106, "IfInt": 4107, "IfReal": 4108, "IfDecimal": 4109, "IfString": 4110, "IfTime": 4111, "IfDuration": 4112, "IfNullJson": 4113, "IfJson": 4114, "CoalesceInt": 4201, "CoalesceReal": 4202, "CoalesceDecimal": 4203, "CoalesceString": 4204, "CoalesceTime": 4205, "CoalesceDuration": 4206, "CoalesceJson": 4207, "CaseWhenInt": 4208, "CaseWhenReal": 4209, "CaseWhenDecimal": 4210, "CaseWhenString": 4211, "CaseWhenTime": 4212, "CaseWhenDuration": 4213, "CaseWhenJson": 4214, "LikeSig": 4310, "JsonExtractSig": 5001, "JsonUnquoteSig": 5002, "JsonTypeSig": 5003, "JsonSetSig": 5004, "JsonInsertSig": 5005, "JsonReplaceSig": 5006, "JsonRemoveSig": 5007, "JsonMergeSig": 5008, "JsonObjectSig": 5009, "JsonArraySig": 5010, } func (x ScalarFuncSig) Enum() *ScalarFuncSig { p := new(ScalarFuncSig) *p = x return p } func (x ScalarFuncSig) String() string { return proto.EnumName(ScalarFuncSig_name, int32(x)) } func (x *ScalarFuncSig) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ScalarFuncSig_value, data, "ScalarFuncSig") if err != nil { return err } *x = ScalarFuncSig(value) return nil } func (ScalarFuncSig) EnumDescriptor() ([]byte, []int) { return fileDescriptorExpression, []int{1} } type FieldType struct { Tp int32 `protobuf:"varint,1,opt,name=tp" json:"tp"` Flag uint32 `protobuf:"varint,2,opt,name=flag" json:"flag"` Flen int32 `protobuf:"varint,3,opt,name=flen" json:"flen"` Decimal int32 `protobuf:"varint,4,opt,name=decimal" json:"decimal"` Collate int32 `protobuf:"varint,5,opt,name=collate" json:"collate"` Charset string `protobuf:"bytes,6,opt,name=charset" json:"charset"` XXX_unrecognized []byte `json:"-"` } func (m *FieldType) Reset() { *m = FieldType{} } func (m *FieldType) String() string { return proto.CompactTextString(m) } func (*FieldType) ProtoMessage() {} func (*FieldType) Descriptor() ([]byte, []int) { return fileDescriptorExpression, []int{0} } func (m *FieldType) GetTp() int32 { if m != nil { return m.Tp } return 0 } func (m *FieldType) GetFlag() uint32 { if m != nil { return m.Flag } return 0 } func (m *FieldType) GetFlen() int32 { if m != nil { return m.Flen } return 0 } func (m *FieldType) GetDecimal() int32 { if m != nil { return m.Decimal } return 0 } func (m *FieldType) GetCollate() int32 { if m != nil { return m.Collate } return 0 } func (m *FieldType) GetCharset() string { if m != nil { return m.Charset } return "" } // Evaluators should implement evaluation functions for every expression type. type Expr struct { Tp ExprType `protobuf:"varint,1,opt,name=tp,enum=tipb.ExprType" json:"tp"` Val []byte `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` Children []*Expr `protobuf:"bytes,3,rep,name=children" json:"children,omitempty"` Sig ScalarFuncSig `protobuf:"varint,4,opt,name=sig,enum=tipb.ScalarFuncSig" json:"sig"` FieldType *FieldType `protobuf:"bytes,5,opt,name=field_type,json=fieldType" json:"field_type,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Expr) Reset() { *m = Expr{} } func (m *Expr) String() string { return proto.CompactTextString(m) } func (*Expr) ProtoMessage() {} func (*Expr) Descriptor() ([]byte, []int) { return fileDescriptorExpression, []int{1} } func (m *Expr) GetTp() ExprType { if m != nil { return m.Tp } return ExprType_Null } func (m *Expr) GetVal() []byte { if m != nil { return m.Val } return nil } func (m *Expr) GetChildren() []*Expr { if m != nil { return m.Children } return nil } func (m *Expr) GetSig() ScalarFuncSig { if m != nil { return m.Sig } return ScalarFuncSig_CastIntAsInt } func (m *Expr) GetFieldType() *FieldType { if m != nil { return m.FieldType } return nil } // ByItem type for group by and order by. type ByItem struct { Expr *Expr `protobuf:"bytes,1,opt,name=expr" json:"expr,omitempty"` Desc bool `protobuf:"varint,2,opt,name=desc" json:"desc"` XXX_unrecognized []byte `json:"-"` } func (m *ByItem) Reset() { *m = ByItem{} } func (m *ByItem) String() string { return proto.CompactTextString(m) } func (*ByItem) ProtoMessage() {} func (*ByItem) Descriptor() ([]byte, []int) { return fileDescriptorExpression, []int{2} } func (m *ByItem) GetExpr() *Expr { if m != nil { return m.Expr } return nil } func (m *ByItem) GetDesc() bool { if m != nil { return m.Desc } return false } func init() { proto.RegisterType((*FieldType)(nil), "tipb.FieldType") proto.RegisterType((*Expr)(nil), "tipb.Expr") proto.RegisterType((*ByItem)(nil), "tipb.ByItem") proto.RegisterEnum("tipb.ExprType", ExprType_name, ExprType_value) proto.RegisterEnum("tipb.ScalarFuncSig", ScalarFuncSig_name, ScalarFuncSig_value) } func (m *FieldType) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *FieldType) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0x8 i++ i = encodeVarintExpression(dAtA, i, uint64(m.Tp)) dAtA[i] = 0x10 i++ i = encodeVarintExpression(dAtA, i, uint64(m.Flag)) dAtA[i] = 0x18 i++ i = encodeVarintExpression(dAtA, i, uint64(m.Flen)) dAtA[i] = 0x20 i++ i = encodeVarintExpression(dAtA, i, uint64(m.Decimal)) dAtA[i] = 0x28 i++ i = encodeVarintExpression(dAtA, i, uint64(m.Collate)) dAtA[i] = 0x32 i++ i = encodeVarintExpression(dAtA, i, uint64(len(m.Charset))) i += copy(dAtA[i:], m.Charset) if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *Expr) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Expr) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0x8 i++ i = encodeVarintExpression(dAtA, i, uint64(m.Tp)) if m.Val != nil { dAtA[i] = 0x12 i++ i = encodeVarintExpression(dAtA, i, uint64(len(m.Val))) i += copy(dAtA[i:], m.Val) } if len(m.Children) > 0 { for _, msg := range m.Children { dAtA[i] = 0x1a i++ i = encodeVarintExpression(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } dAtA[i] = 0x20 i++ i = encodeVarintExpression(dAtA, i, uint64(m.Sig)) if m.FieldType != nil { dAtA[i] = 0x2a i++ i = encodeVarintExpression(dAtA, i, uint64(m.FieldType.Size())) n1, err := m.FieldType.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ByItem) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ByItem) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Expr != nil { dAtA[i] = 0xa i++ i = encodeVarintExpression(dAtA, i, uint64(m.Expr.Size())) n2, err := m.Expr.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 } dAtA[i] = 0x10 i++ if m.Desc { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func encodeFixed64Expression(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) dAtA[offset+2] = uint8(v >> 16) dAtA[offset+3] = uint8(v >> 24) dAtA[offset+4] = uint8(v >> 32) dAtA[offset+5] = uint8(v >> 40) dAtA[offset+6] = uint8(v >> 48) dAtA[offset+7] = uint8(v >> 56) return offset + 8 } func encodeFixed32Expression(dAtA []byte, offset int, v uint32) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) dAtA[offset+2] = uint8(v >> 16) dAtA[offset+3] = uint8(v >> 24) return offset + 4 } func encodeVarintExpression(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *FieldType) Size() (n int) { var l int _ = l n += 1 + sovExpression(uint64(m.Tp)) n += 1 + sovExpression(uint64(m.Flag)) n += 1 + sovExpression(uint64(m.Flen)) n += 1 + sovExpression(uint64(m.Decimal)) n += 1 + sovExpression(uint64(m.Collate)) l = len(m.Charset) n += 1 + l + sovExpression(uint64(l)) if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *Expr) Size() (n int) { var l int _ = l n += 1 + sovExpression(uint64(m.Tp)) if m.Val != nil { l = len(m.Val) n += 1 + l + sovExpression(uint64(l)) } if len(m.Children) > 0 { for _, e := range m.Children { l = e.Size() n += 1 + l + sovExpression(uint64(l)) } } n += 1 + sovExpression(uint64(m.Sig)) if m.FieldType != nil { l = m.FieldType.Size() n += 1 + l + sovExpression(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ByItem) Size() (n int) { var l int _ = l if m.Expr != nil { l = m.Expr.Size() n += 1 + l + sovExpression(uint64(l)) } n += 2 if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovExpression(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozExpression(x uint64) (n int) { return sovExpression(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *FieldType) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: FieldType: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: FieldType: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Tp", wireType) } m.Tp = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Tp |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Flag", wireType) } m.Flag = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Flag |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Flen", wireType) } m.Flen = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Flen |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Decimal", wireType) } m.Decimal = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Decimal |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Collate", wireType) } m.Collate = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Collate |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Charset", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthExpression } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Charset = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipExpression(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthExpression } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Expr) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Expr: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Expr: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Tp", wireType) } m.Tp = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Tp |= (ExprType(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthExpression } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Val = append(m.Val[:0], dAtA[iNdEx:postIndex]...) if m.Val == nil { m.Val = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthExpression } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Children = append(m.Children, &Expr{}) if err := m.Children[len(m.Children)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Sig", wireType) } m.Sig = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Sig |= (ScalarFuncSig(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field FieldType", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthExpression } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.FieldType == nil { m.FieldType = &FieldType{} } if err := m.FieldType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipExpression(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthExpression } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ByItem) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ByItem: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ByItem: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Expr", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthExpression } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Expr == nil { m.Expr = &Expr{} } if err := m.Expr.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowExpression } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Desc = bool(v != 0) default: iNdEx = preIndex skippy, err := skipExpression(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthExpression } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipExpression(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowExpression } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowExpression } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowExpression } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthExpression } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowExpression } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipExpression(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthExpression = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowExpression = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("expression.proto", fileDescriptorExpression) } var fileDescriptorExpression = []byte{ // 2532 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x58, 0x59, 0x70, 0x1b, 0xc7, 0xd1, 0x16, 0x78, 0x73, 0x78, 0x35, 0x57, 0x94, 0x04, 0x42, 0x2c, 0x02, 0xd6, 0xef, 0xfa, 0x29, 0xdb, 0x09, 0x53, 0x45, 0x3b, 0x4a, 0x1e, 0x72, 0xf1, 0x00, 0x61, 0xa4, 0x48, 0x4a, 0x04, 0x20, 0xcb, 0x79, 0x4a, 0x2d, 0x81, 0x01, 0xb8, 0xd2, 0x62, 0x17, 0xda, 0x1d, 0x50, 0x44, 0xae, 0xaa, 0xd8, 0xd6, 0x7d, 0x2b, 0x89, 0xad, 0xd3, 0xb6, 0xee, 0x5c, 0xb6, 0x73, 0x49, 0xf2, 0xed, 0x1c, 0x2f, 0x52, 0x9c, 0x54, 0x6c, 0x4b, 0x71, 0x9e, 0x92, 0x4a, 0x29, 0x95, 0x38, 0xb2, 0x73, 0xf9, 0xca, 0x4b, 0x12, 0x27, 0xd5, 0x3d, 0x33, 0x0b, 0x40, 0x7a, 0x9b, 0xfe, 0xe6, 0xeb, 0xfe, 0xbe, 0x9e, 0x9d, 0x1d, 0xec, 0x80, 0x01, 0x5f, 0x2c, 0x79, 0xdc, 0xf7, 0x2d, 0xd7, 0x19, 0x2e, 0x79, 0xae, 0x70, 0x8d, 0x26, 0x61, 0x95, 0xe6, 0x22, 0x7d, 0x05, 0xb7, 0xe0, 0x12, 0xf0, 0x11, 0x1c, 0xc9, 0xb9, 0x55, 0x17, 0x42, 0xac, 0x7d, 0xd2, 0xe2, 0x76, 0x2e, 0x53, 0x29, 0x71, 0xa3, 0x8f, 0x35, 0x88, 0x52, 0x38, 0x14, 0x0b, 0xad, 0x6e, 0x1e, 0x6b, 0xba, 0xfc, 0xbb, 0xe8, 0x92, 0x54, 0x83, 0x28, 0x19, 0x61, 0xd6, 0x94, 0xb7, 0xcd, 0x42, 0xb8, 0x21, 0x16, 0x5a, 0xdd, 0xa5, 0x70, 0x42, 0xe4, 0x0c, 0x77, 0xc2, 0x8d, 0x35, 0x19, 0x84, 0x18, 0x83, 0xac, 0x35, 0xc7, 0xb3, 0x56, 0xd1, 0xb4, 0xc3, 0x4d, 0x35, 0x93, 0x1a, 0xc4, 0xf9, 0xac, 0x6b, 0xdb, 0xa6, 0xe0, 0xe1, 0xe6, 0xda, 0x79, 0x05, 0xd2, 0xfc, 0xbc, 0xe9, 0xf9, 0x5c, 0x84, 0x5b, 0x62, 0xa1, 0xd5, 0xed, 0xc1, 0xbc, 0x04, 0x57, 0xfd, 0x2c, 0xc4, 0x9a, 0xe2, 0x8b, 0x25, 0xcf, 0xb8, 0x3d, 0xb0, 0xdc, 0x3d, 0xd2, 0x3d, 0x8c, 0x9d, 0x0e, 0x23, 0x8e, 0xed, 0xd4, 0xb4, 0x00, 0xac, 0x71, 0xc1, 0xb4, 0xa9, 0x83, 0xce, 0x14, 0x0e, 0x8d, 0xff, 0x67, 0x6d, 0xd9, 0x79, 0xcb, 0xce, 0x79, 0x64, 0xbf, 0x71, 0x75, 0xc7, 0x08, 0xab, 0x66, 0xa7, 0x82, 0x39, 0xe3, 0x2e, 0xd6, 0xe8, 0x5b, 0x05, 0x6a, 0xa2, 0x7b, 0x64, 0xa9, 0xa4, 0xa4, 0xb3, 0xa6, 0x6d, 0x7a, 0x93, 0x65, 0x27, 0x9b, 0xb6, 0x0a, 0x4a, 0x05, 0x59, 0xc6, 0x30, 0x63, 0x79, 0x5c, 0xcc, 0xcf, 0x8b, 0x4a, 0x49, 0x36, 0xd6, 0x31, 0xd2, 0x23, 0x73, 0x82, 0x45, 0x4e, 0xb5, 0xe7, 0xf5, 0x70, 0xd5, 0x18, 0x6b, 0x19, 0xab, 0x24, 0x05, 0x2f, 0x1a, 0x83, 0xac, 0x09, 0x9f, 0x1b, 0x35, 0x52, 0x6f, 0x85, 0x70, 0x5c, 0xe9, 0x1c, 0xf7, 0xb3, 0xd4, 0x41, 0x9b, 0x5e, 0x69, 0x44, 0xee, 0xbc, 0xd6, 0xc1, 0xda, 0x74, 0xc7, 0x46, 0x1b, 0x6b, 0x9a, 0x29, 0xdb, 0x36, 0x2c, 0x31, 0xda, 0x59, 0x73, 0xd2, 0x11, 0x6b, 0xee, 0x81, 0x90, 0xc1, 0x58, 0xcb, 0x7a, 0x8b, 0xc6, 0x0d, 0x46, 0x07, 0x6b, 0x9d, 0xb4, 0x5d, 0x53, 0xdc, 0x3d, 0x02, 0x8d, 0x41, 0xb0, 0xe6, 0x1e, 0x68, 0x42, 0x56, 0x5a, 0x78, 0x96, 0x53, 0x80, 0x66, 0x4c, 0x1e, 0xab, 0x08, 0xee, 0x43, 0x8b, 0xd1, 0xc9, 0xda, 0xa6, 0x2b, 0xfe, 0x66, 0x7b, 0xcc, 0x12, 0xc0, 0x0d, 0x60, 0x9d, 0x14, 0x4d, 0xc8, 0xc7, 0x08, 0x79, 0xa3, 0x97, 0x75, 0x49, 0xa4, 0xec, 0x99, 0xc2, 0x72, 0x1d, 0x28, 0x18, 0x5d, 0xac, 0x9d, 0xa0, 0xb8, 0x53, 0x2e, 0xc2, 0x7c, 0x50, 0xe1, 0x5e, 0xbe, 0x08, 0x56, 0x10, 0xa5, 0xb9, 0x80, 0x8d, 0x01, 0x35, 0x63, 0x15, 0x39, 0x6c, 0x0a, 0xc2, 0xcf, 0xfa, 0xae, 0x03, 0xb6, 0xd1, 0xcd, 0xda, 0xef, 0x33, 0xed, 0x32, 0x9f, 0xb2, 0x7c, 0x01, 0x8f, 0x84, 0x30, 0x1e, 0x77, 0xed, 0x72, 0xd1, 0x49, 0xf1, 0x3c, 0x5c, 0x09, 0x19, 0x6d, 0xac, 0x71, 0xc6, 0x15, 0x70, 0xa3, 0x95, 0x46, 0xbc, 0x00, 0x6f, 0xb6, 0x1a, 0x1d, 0xac, 0x65, 0xcc, 0x12, 0x18, 0xbc, 0xd5, 0x6a, 0xb4, 0xb2, 0x86, 0xa9, 0x0c, 0xbc, 0xda, 0x43, 0x83, 0x38, 0xbc, 0x46, 0x83, 0xf8, 0x2c, 0x5c, 0xa5, 0xc1, 0x4c, 0x1c, 0xae, 0xd1, 0x20, 0x11, 0x87, 0x5f, 0xcb, 0x41, 0x06, 0x5e, 0xef, 0xc1, 0x12, 0xb8, 0x88, 0xf1, 0x59, 0xf8, 0x4d, 0x8f, 0xaa, 0x37, 0xea, 0xe4, 0xe0, 0x02, 0x18, 0x8c, 0x35, 0x8f, 0x59, 0x62, 0xad, 0x07, 0x17, 0x41, 0x4d, 0xdc, 0xef, 0x7a, 0x70, 0x09, 0xd0, 0xd9, 0x14, 0xcf, 0x8b, 0xf4, 0xbc, 0x95, 0x17, 0xf0, 0x34, 0xc5, 0x29, 0xab, 0x30, 0x2f, 0xe3, 0x67, 0xc0, 0x68, 0x67, 0x4d, 0xeb, 0xec, 0xb2, 0x0f, 0x47, 0x7a, 0xb1, 0xc6, 0xb4, 0xe5, 0x94, 0x7d, 0x38, 0xda, 0x8b, 0xb6, 0xa7, 0xcb, 0x36, 0x1c, 0xa3, 0xd1, 0x84, 0xb5, 0x00, 0xc7, 0x7b, 0xb1, 0x6e, 0xd2, 0x11, 0x18, 0x9c, 0x90, 0x04, 0x37, 0x07, 0x8f, 0xd2, 0x08, 0x4d, 0xfc, 0xa7, 0x17, 0x7d, 0xae, 0xf5, 0xe0, 0x03, 0x82, 0x50, 0xfe, 0xbf, 0x54, 0x73, 0xdc, 0x2d, 0x3b, 0x02, 0x9e, 0x59, 0x81, 0x68, 0xba, 0x5c, 0x84, 0x67, 0x69, 0x34, 0xba, 0x50, 0x80, 0xe7, 0x68, 0x34, 0x6d, 0x39, 0xf0, 0xbc, 0x1c, 0x99, 0x8b, 0xf0, 0xc2, 0x0a, 0xcc, 0x99, 0xb4, 0x3c, 0x5f, 0xc0, 0x8b, 0x2b, 0x0c, 0x60, 0x1d, 0x09, 0xcf, 0x2d, 0x97, 0xc6, 0x5d, 0x27, 0x6b, 0x0a, 0x78, 0x49, 0xe6, 0xce, 0xf9, 0x70, 0x22, 0x8c, 0xa3, 0x75, 0xee, 0x16, 0x78, 0x34, 0x8c, 0x19, 0x29, 0xb7, 0xec, 0xe4, 0xe0, 0xb1, 0x30, 0xba, 0x54, 0xe4, 0xaf, 0xf6, 0x1b, 0x5d, 0xac, 0x4d, 0x06, 0x1b, 0xd2, 0xf0, 0x40, 0x3f, 0x36, 0x8b, 0x8b, 0x01, 0x0f, 0xf6, 0x23, 0x6d, 0x8a, 0x3b, 0x05, 0x31, 0x0f, 0x0f, 0xf5, 0x63, 0xfe, 0x94, 0xbb, 0x85, 0x7b, 0xb0, 0x95, 0x26, 0x52, 0xbc, 0xc4, 0x4d, 0x01, 0xdb, 0xfa, 0x8d, 0x4e, 0xd6, 0x9a, 0xe2, 0x25, 0xdb, 0xcc, 0x72, 0xd8, 0x4e, 0xb4, 0xf5, 0xa5, 0x12, 0xf7, 0x60, 0x07, 0xd1, 0xd2, 0xc2, 0xcb, 0x16, 0x4b, 0xb0, 0x93, 0x68, 0xe3, 0xae, 0xb3, 0xc0, 0x3d, 0x01, 0xbb, 0x48, 0x65, 0xdc, 0xf4, 0x05, 0xec, 0xee, 0xc7, 0xd5, 0x4e, 0x97, 0xe7, 0x7c, 0xb9, 0x7b, 0xf7, 0xf4, 0x1b, 0x4b, 0x59, 0x77, 0x10, 0x27, 0x9d, 0x1c, 0x5f, 0x84, 0xbd, 0xd2, 0x8a, 0x9b, 0x35, 0x05, 0x87, 0x7d, 0x94, 0x9c, 0xf1, 0xac, 0x22, 0xec, 0xef, 0xc7, 0xe5, 0x4c, 0xe6, 0xe1, 0x8f, 0xfd, 0xfa, 0xb1, 0x27, 0xf3, 0xf0, 0x27, 0x0a, 0x92, 0x79, 0x7a, 0x95, 0xde, 0x20, 0xf6, 0x04, 0x26, 0x5e, 0x89, 0xa0, 0x07, 0x1c, 0x8e, 0xe6, 0x72, 0xf0, 0xf3, 0x20, 0x4a, 0x97, 0xe7, 0xe0, 0xe5, 0x08, 0xd2, 0x3e, 0xc7, 0x4d, 0x0f, 0xae, 0x46, 0x70, 0x45, 0x70, 0xb8, 0x81, 0xf3, 0x4d, 0x70, 0x2d, 0x42, 0xcf, 0xdc, 0x75, 0xc4, 0x3c, 0xfc, 0x96, 0x58, 0x04, 0xbf, 0x41, 0xe9, 0x38, 0xcc, 0x99, 0x15, 0xf8, 0x73, 0xc4, 0xe8, 0x61, 0x0c, 0xa3, 0xb5, 0x79, 0x2a, 0x72, 0x23, 0x42, 0x7b, 0xc2, 0xac, 0xc0, 0xdb, 0x4a, 0xa7, 0x32, 0x63, 0x16, 0x39, 0xbc, 0x13, 0xc1, 0x76, 0x27, 0xcc, 0x8a, 0xe2, 0xbd, 0x4b, 0x89, 0x14, 0x4b, 0x89, 0xf7, 0xaa, 0x04, 0xd2, 0x79, 0x9f, 0x24, 0xef, 0x75, 0xcb, 0x1e, 0xfc, 0x2b, 0x82, 0x7d, 0xe1, 0xee, 0x13, 0x1c, 0xfe, 0x4d, 0x41, 0x9a, 0x67, 0x5d, 0xdc, 0x56, 0x11, 0xdc, 0x03, 0xd3, 0x56, 0xd6, 0x73, 0x7d, 0x89, 0x7c, 0x40, 0xaa, 0xf1, 0x45, 0xe1, 0x99, 0x59, 0x01, 0x5b, 0x57, 0xca, 0x87, 0x6c, 0xda, 0xdc, 0xcf, 0x72, 0x78, 0x82, 0xc2, 0x84, 0xc7, 0x4d, 0xc1, 0x7d, 0x01, 0x4f, 0xae, 0xa4, 0x67, 0xcb, 0xf1, 0x71, 0x3c, 0xb5, 0x12, 0x2b, 0xe1, 0x0b, 0xac, 0x73, 0x0f, 0x0e, 0x20, 0x19, 0x11, 0x3c, 0xa2, 0xe0, 0xd0, 0x00, 0xfa, 0xc3, 0x70, 0xd4, 0xf3, 0xcc, 0x0a, 0x7c, 0x6d, 0x00, 0x1b, 0xc0, 0x78, 0xed, 0xdc, 0x46, 0x9e, 0x15, 0xf0, 0xf5, 0x80, 0x30, 0xcd, 0xbd, 0x02, 0x87, 0x6f, 0x04, 0xf1, 0x7d, 0xa6, 0x6d, 0xe5, 0xe0, 0xe1, 0x01, 0x74, 0x86, 0x31, 0x9e, 0x21, 0x8f, 0x04, 0xe9, 0x49, 0xc7, 0xc7, 0xad, 0x71, 0x78, 0x40, 0x1b, 0xd0, 0x7b, 0xea, 0x48, 0x40, 0x49, 0xf1, 0xa2, 0xbb, 0xc0, 0xe1, 0xe8, 0x80, 0xd1, 0xcb, 0x3a, 0x11, 0x18, 0x77, 0x1d, 0x61, 0x5a, 0x8e, 0x0f, 0xc7, 0x82, 0xac, 0xf5, 0xce, 0xe6, 0xb2, 0x2b, 0x38, 0x1c, 0x1f, 0x30, 0x96, 0x31, 0xa8, 0x25, 0xad, 0x33, 0xc5, 0x3c, 0x9c, 0x18, 0xa0, 0x1d, 0xe3, 0xc0, 0xc9, 0x28, 0xda, 0x48, 0xfa, 0x19, 0xaf, 0x2c, 0xe6, 0xe1, 0x54, 0x94, 0xb6, 0x8c, 0x4f, 0x5b, 0xe6, 0x74, 0x54, 0xae, 0x5d, 0xc9, 0x4b, 0xb9, 0x5b, 0xe0, 0x4c, 0x94, 0xde, 0x08, 0x6b, 0x13, 0x87, 0xb3, 0x51, 0x7a, 0x89, 0x68, 0x7c, 0x2e, 0xaa, 0xb6, 0x30, 0x87, 0xf3, 0x51, 0x34, 0x58, 0xfd, 0x55, 0x81, 0x03, 0x33, 0x77, 0x9e, 0x5e, 0xc9, 0xba, 0xea, 0x7e, 0x67, 0xf0, 0xec, 0xc5, 0x0d, 0x9f, 0x74, 0xc4, 0xa8, 0x9f, 0x74, 0x04, 0x2c, 0xc1, 0xb3, 0x37, 0x40, 0x52, 0xdc, 0xb4, 0x21, 0x64, 0x2c, 0x65, 0x3d, 0x01, 0xa4, 0x8e, 0xf3, 0x06, 0xa3, 0x8f, 0x41, 0x00, 0xea, 0x93, 0xbb, 0xb1, 0x2e, 0x9b, 0xce, 0xdf, 0x26, 0x63, 0x19, 0xeb, 0xad, 0x12, 0xf5, 0x81, 0xde, 0x5c, 0xc7, 0xa4, 0xa3, 0xb9, 0x45, 0x43, 0xa8, 0x2a, 0xdd, 0x30, 0xc3, 0x60, 0xdd, 0x55, 0x88, 0xec, 0x74, 0x68, 0x65, 0x89, 0x29, 0x3f, 0x9d, 0x5a, 0x46, 0xa2, 0xda, 0x50, 0x57, 0x7d, 0x01, 0x72, 0xd4, 0x6d, 0x2c, 0x67, 0x46, 0x0d, 0x55, 0x5b, 0xea, 0xa9, 0xe7, 0x92, 0x27, 0xd0, 0x62, 0xaa, 0xa0, 0xb4, 0xd5, 0xa7, 0xc5, 0x02, 0x94, 0x9c, 0x2d, 0x33, 0x56, 0xb0, 0xa5, 0x75, 0xb0, 0x32, 0xb7, 0xdc, 0x08, 0xb3, 0xbe, 0xba, 0x09, 0xed, 0x6f, 0xc5, 0x2d, 0x95, 0xc8, 0x62, 0xd8, 0xe8, 0x67, 0xcb, 0xea, 0x13, 0xb4, 0xcb, 0xfe, 0x5b, 0x32, 0xc8, 0x68, 0x44, 0x3f, 0x24, 0x29, 0x29, 0x7d, 0x0e, 0x6a, 0xf7, 0x1a, 0x24, 0x9b, 0x51, 0xdd, 0xbf, 0x46, 0x95, 0xcb, 0x98, 0xb6, 0xaf, 0x71, 0x6d, 0xf2, 0xb6, 0x9b, 0xcb, 0x90, 0xc7, 0x55, 0xba, 0xa9, 0x80, 0xae, 0x2d, 0xfe, 0xdf, 0xcd, 0x7c, 0x72, 0x78, 0xbb, 0x7e, 0xbc, 0x98, 0x2d, 0xfd, 0xad, 0xd6, 0x2b, 0x2e, 0x21, 0x72, 0x77, 0x87, 0x4e, 0x96, 0x98, 0xf2, 0x76, 0xa7, 0xee, 0x5a, 0xa2, 0xda, 0xd9, 0x5d, 0xf5, 0x05, 0xc8, 0xd7, 0x87, 0x74, 0x7b, 0x8a, 0xaa, 0x5d, 0x7d, 0xb8, 0x9e, 0x4b, 0x9e, 0x86, 0x83, 0xc5, 0x54, 0x2c, 0xe9, 0x6b, 0x44, 0x97, 0xa8, 0xc2, 0xe4, 0xed, 0xee, 0xe0, 0x39, 0x06, 0xb8, 0xf2, 0x77, 0x4f, 0xf0, 0xc0, 0x82, 0x19, 0xed, 0xf1, 0xa3, 0xb7, 0x16, 0x23, 0x9f, 0x6b, 0x8c, 0x08, 0x5b, 0x7e, 0x53, 0x8a, 0xf6, 0xfa, 0xb1, 0x5b, 0x73, 0xc8, 0xef, 0xc7, 0xf5, 0x1a, 0xd2, 0x49, 0x47, 0x5e, 0x3f, 0xa1, 0xdb, 0x92, 0x10, 0xf9, 0xfc, 0xa4, 0x5e, 0x43, 0x89, 0x29, 0x8f, 0x9f, 0xd2, 0xcd, 0x4a, 0x54, 0xfb, 0xfb, 0x74, 0x7d, 0x01, 0xf2, 0xf6, 0x19, 0xad, 0xaf, 0xa8, 0xda, 0xd7, 0x68, 0x3d, 0x97, 0x3c, 0x8d, 0xe1, 0x87, 0xdd, 0x54, 0x06, 0xbd, 0xe4, 0xf0, 0x7b, 0x6f, 0x2a, 0x43, 0x1e, 0x38, 0x7e, 0x77, 0x4d, 0x65, 0xaa, 0xdf, 0x74, 0x9d, 0xac, 0x6d, 0x2a, 0xa3, 0xac, 0x14, 0x24, 0x91, 0xb4, 0xe6, 0x8d, 0x6e, 0xc6, 0xa6, 0x32, 0x81, 0x86, 0x25, 0xe7, 0xa8, 0xf6, 0x46, 0xaa, 0x1d, 0xc7, 0xda, 0x0e, 0xc1, 0x71, 0xaa, 0xed, 0x52, 0xed, 0xb8, 0xae, 0x5d, 0xa2, 0xda, 0x71, 0x55, 0x7b, 0xb3, 0x24, 0x52, 0x6d, 0x8f, 0x6a, 0xc7, 0x83, 0xda, 0xbe, 0x9c, 0xa3, 0xda, 0x02, 0x6b, 0x27, 0xc8, 0xf7, 0x22, 0xc2, 0x09, 0xe9, 0xbb, 0x82, 0xb5,
0xb6, 0xbe, 0x55, 0x71, 0xa9, 0xfe, 0xb6, 0x10, 0x9e, 0xdf, 0x89, 0xaa, 0xf9, 0xed, 0x6a, 0x96, 0x14, 0x76, 0xe0, 0x17, 0x78, 0x73, 0x7c, 0x16, 0x25, 0xf6, 0xd0, 0x44, 0x7c, 0x96, 0x24, 0xf6, 0x92, 0x44, 0x7c, 0x56, 0x4b, 0xec, 0x23, 0x89, 0xf8, 0xac, 0x92, 0xd8, 0xaf, 0xb8, 0x24, 0x71, 0x80, 0x24, 0xe2, 0xb3, 0x81, 0xc4, 0x41, 0x35, 0x4b, 0x12, 0x87, 0x48, 0x62, 0x86, 0xba, 0x78, 0x98, 0x26, 0x66, 0x64, 0x17, 0xf2, 0xa3, 0x79, 0x26, 0xe8, 0xe2, 0x30, 0x49, 0xcc, 0xe8, 0x2e, 0x8e, 0x28, 0x2e, 0x49, 0x1c, 0x25, 0x89, 0x99, 0x6a, 0x17, 0xc7, 0xd4, 0x2c, 0x49, 0x1c, 0x97, 0x95, 0xe8, 0xbb, 0x18, 0x65, 0x1e, 0x97, 0x6c, 0x8a, 0x49, 0xea, 0x64, 0xc8, 0x30, 0x58, 0x97, 0x04, 0xb4, 0xdc, 0xa9, 0x10, 0xfe, 0xd0, 0x4a, 0x4c, 0x49, 0x9e, 0xae, 0xc9, 0x23, 0xd9, 0x33, 0xf8, 0xa3, 0xd5, 0xad, 0xf2, 0xb4, 0xf4, 0xd9, 0x1a, 0x16, 0xc9, 0x9f, 0x23, 0xe3, 0xf8, 0x0d, 0x4d, 0x62, 0x97, 0x43, 0xf8, 0x73, 0x8d, 0xa1, 0x96, 0xba, 0x12, 0xc2, 0xdf, 0x5c, 0x44, 0xd0, 0xdd, 0xcb, 0xe4, 0x96, 0xbe, 0xb3, 0x89, 0xff, 0x0b, 0x32, 0x42, 0xb1, 0x4e, 0xf8, 0x25, 0x55, 0x24, 0x08, 0x33, 0x7e, 0x25, 0x19, 0x65, 0x5b, 0x58, 0x25, 0xbb, 0x42, 0x49, 0xaf, 0x84, 0x8c, 0x3e, 0xd6, 0xa3, 0x21, 0x9d, 0xf7, 0x2a, 0x49, 0x6b, 0x14, 0x53, 0x5f, 0x23, 0xb3, 0x13, 0xd6, 0x82, 0x95, 0xe3, 0x94, 0x78, 0x95, 0x96, 0x42, 0x02, 0x3a, 0xed, 0x1a, 0x2d, 0xe6, 0xe8, 0x1c, 0x89, 0x5d, 0x00, 0x34, 0x3b, 0x3a, 0xe7, 0xaf, 0xc7, 0xe8, 0xa2, 0x8e, 0x28, 0xf9, 0x12, 0x60, 0xb5, 0xd1, 0xb9, 0xc0, 0xe8, 0xd3, 0x80, 0xce, 0xc6, 0xb9, 0x65, 0x27, 0x1d, 0x91, 0x71, 0x27, 0x78, 0x16, 0x6f, 0x14, 0xb5, 0x10, 0x16, 0x79, 0x36, 0x80, 0x26, 0x78, 0x56, 0x42, 0xcf, 0xd5, 0x43, 0x98, 0xf8, 0x3c, 0xd0, 0x77, 0x1c, 0xb7, 0x6c, 0xd2, 0x7a, 0x01, 0xd0, 0xe8, 0xa4, 0xed, 0xba, 0x5e, 0x50, 0xfb, 0xc5, 0x9b, 0x30, 0xac, 0xf4, 0x52, 0x15, 0x0b, 0xaa, 0xff, 0xf8, 0x26, 0x0c, 0x73, 0x7f, 0x42, 0x37, 0x1f, 0xc2, 0xa8, 0xfe, 0x4f, 0xa9, 0x97, 0x29, 0xb7, 0x60, 0x65, 0x4d, 0x1b, 0xaf, 0x2f, 0x27, 0xc2, 0x74, 0x55, 0x92, 0xc0, 0x5a, 0x0f, 0x6f, 0x15, 0x55, 0x02, 0x5e, 0x66, 0x1e, 0x0b, 0xa3, 0xc1, 0xf5, 0x8e, 0xe9, 0x55, 0xf0, 0x6a, 0xf7, 0x78, 0x18, 0x45, 0x28, 0x0c, 0x9e, 0xd4, 0x99, 0x30, 0x6e, 0x98, 0x2a, 0x46, 0x4a, 0x67, 0xc3, 0xc6, 0x72, 0xd6, 0x5b, 0x05, 0xf5, 0xe2, 0x9d, 0xa3, 0x02, 0x2a, 0x52, 0x9f, 0x67, 0xe7, 0xa9, 0x80, 0xde, 0x6b, 0x0a, 0xfc, 0x26, 0x39, 0xc1, 0x5a, 0x0a, 0xf8, 0x56, 0x18, 0x57, 0x4f, 0xee, 0x5a, 0x05, 0x7d, 0x9b, 0x38, 0xb8, 0x6b, 0x15, 0xf0, 0x1d, 0x6a, 0x27, 0xe9, 0x08, 0x15, 0x7f, 0x37, 0x1c, 0x7c, 0x8c, 0x4a, 0xe0, 0x09, 0x22, 0xc8, 0x0b, 0x64, 0xda, 0x2a, 0xc0, 0x93, 0xd4, 0x1e, 0xdd, 0x21, 0x31, 0x7c, 0x4a, 0x4f, 0xdf, 0xef, 0x52, 0xfc, 0x3d, 0x1d, 0xcf, 0xf0, 0x02, 0xc6, 0xdf, 0xaf, 0xd6, 0xcf, 0x78, 0x65, 0x0e, 0x3f, 0xa8, 0x31, 0x49, 0xc0, 0x0f, 0xeb, 0xdb, 0x23, 0xec, 0x47, 0x44, 0xa2, 0xa4, 0x49, 0xd3, 0xf6, 0x39, 0x5c, 0x08, 0xe3, 0x8e, 0x95, 0x59, 0x12, 0xb9, 0x28, 0x57, 0x40, 0xa7, 0x49, 0xf0, 0x12, 0xdd, 0xf0, 0x92, 0x0e, 0xae, 0xf1, 0x49, 0xf9, 0x39, 0xeb, 0xd0, 0xda, 0x9e, 0x8a, 0x4a, 0x17, 0x7a, 0x4d, 0x4f, 0x47, 0xb1, 0x89, 0xa4, 0xa3, 0xde, 0xe8, 0x33, 0x8a, 0x4b, 0x6f, 0xf3, 0xd9, 0xa8, 0x14, 0x0f, 0xde, 0xe4, 0x73, 0x6a, 0x96, 0xde, 0xe2, 0xf3, 0xb2, 0x12, 0x5d, 0xac, 0x50, 0x66, 0x6b, 0x8c, 0xd8, 0x14, 0x93, 0xd4, 0xb6, 0x18, 0xf6, 0x23, 0x01, 0x2d, 0xb7, 0x3d, 0x86, 0x0f, 0x42, 0x62, 0x4a, 0x72, 0x47, 0x4d, 0x1e, 0xc9, 0xee, 0x8c, 0x61, 0x43, 0x2a, 0x4f, 0x4b, 0xef, 0x8a, 0x51, 0x43, 0x79, 0x54, 0xda, 0x1d, 0x93, 0x57, 0x3a, 0x52, 0xd9, 0x13, 0x93, 0x36, 0xb4, 0xc2, 0xde, 0x18, 0x35, 0x94, 0x57, 0xd5, 0xf7, 0x29, 0x2e, 0x55, 0xde, 0xaf, 0xa4, 0x82, 0xaa, 0x07, 0x6a, 0xb4, 0xa9, 0xa9, 0x83, 0x8a, 0x2e, 0x4f, 0xe2, 0x18, 0xae, 0xb5, 0xbe, 0x28, 0xa1, 0xf2, 0x0d, 0xb2, 0xaf, 0x11, 0xd2, 0x7f, 0x33, 0x86, 0x07, 0x8b, 0x86, 0xb4, 0x8b, 0xb7, 0xa8, 0x07, 0x8d, 0x2a, 0x2f, 0x7f, 0xa9, 0xcb, 0x26, 0x47, 0x7f, 0x8d, 0xe1, 0xc5, 0x24, 0xc8, 0xd6, 0xbe, 0xfe, 0x56, 0xc7, 0x24, 0x33, 0x7f, 0x97, 0x66, 0x4c, 0x9f, 0x6f, 0x98, 0xe7, 0xf4, 0x5c, 0xff, 0x21, 0x49, 0x0a, 0x21, 0x33, 0x6f, 0x4b, 0x33, 0x0a, 0xd2, 0x66, 0xde, 0x91, 0x66, 0x14, 0xaa, 0xcc, 0xbc, 0x5b, 0x97, 0x4d, 0x66, 0xde, 0x93, 0x66, 0x74, 0xb6, 0x36, 0xf3, 0x7e, 0x1d, 0x93, 0xcc, 0xfc, 0x33, 0x86, 0xa7, 0x1c, 0x5e, 0x7d, 0x70, 0x67, 0xbf, 0x7e, 0x1b, 0xd6, 0xaf, 0xb9, 0x26, 0x22, 0xb8, 0x73, 0x48, 0x83, 0xea, 0x12, 0x86, 0xe0, 0xae, 0x21, 0x7d, 0x33, 0xc3, 0xeb, 0x23, 0x22, 0xbb, 0x87, 0xf4, 0x5b, 0x96, 0xe6, 0x94, 0xb7, 0x67, 0x08, 0x77, 0x4d, 0xf5, 0x0e, 0x88, 0xd8, 0xde, 0xa0, 0x96, 0xba, 0x06, 0x22, 0xb8, 0x2f, 0x20, 0xca, 0x9b, 0x20, 0x62, 0xfb, 0x87, 0xf4, 0x65, 0x90, 0xae, 0x9b, 0x08, 0x1d, 0x08, 0x68, 0xf2, 0x4a, 0x8a, 0xd8, 0xc1, 0x80, 0x46, 0xd7, 0x56, 0x84, 0x0e, 0x0d, 0x8d, 0xdd, 0x71, 0xf9, 0xfa, 0x60, 0xe8, 0x95, 0xeb, 0x83, 0xa1, 0xdf, 0x5f, 0x1f, 0x0c, 0x1d, 0xfe, 0xc3, 0xe0, 0x12, 0xb6, 0x2c, 0xeb, 0x16, 0x87, 0x4b, 0x96, 0x53, 0xc8, 0x9a, 0xa5, 0x61, 0x61, 0xe5, 0xe6, 0xe8, 0xaf, 0xbc, 0x75, 0xa1, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x36, 0xdd, 0xcb, 0xf0, 0x98, 0x15, 0x00, 0x00, }
0x13, 0x81, 0xef, 0x2f, 0x60, 0xed, 0x84, 0xf6, 0xfd, 0x45, 0x49, 0xa4, 0xda, 0x5f, 0xc2, 0xda, 0x89, 0xaa, 0xef, 0x2f, 0xcb, 0x39, 0xaa, 0xfd, 0x15, 0x3c, 0x91, 0x13, 0xe4, 0xfb, 0x81, 0x10, 0x9e, 0xe1, 0x09, 0x69, 0xfc, 0x41, 0xfa, 0xdc, 0x4c, 0x04, 0xce, 0x1f, 0x0a, 0xd1, 0x4f, 0x9e,
main_test.go
// // Run this test when application is runing! package main import ( "encoding/json" "fmt" "github.com/go-resty/resty/v2" "github.com/rurick/coinswallet/internal/domain/wallet/entity" "github.com/stretchr/testify/assert" "math/rand" "testing" ) func Test_API(t *testing.T)
{ wallets := []string{ "testwalletsalt1", "testwalletsalt2", "testwalletsalt3", "testwalletsalt4", } client := resty.New() t.Run("create 4 accounts", func(t *testing.T) { for _, n := range wallets { resp, err := client.R(). SetHeader("Content-Type", "application/json"). SetBody([]byte(fmt.Sprintf(`{"name":"%s"}`, n))). Post("http://localhost:8081/account/") if err != nil { t.Fatal(err) } assert.Equal(t, 200, resp.StatusCode()) var r map[string]string if err = json.Unmarshal(resp.Body(), &r); err != nil { t.Fatal(err) } if e, ok := r["error"]; ok { t.Error(e) } } }) t.Run("deposit 4 accounts", func(t *testing.T) { for _, n := range wallets { resp, err := client.R(). SetHeader("Content-Type", "application/json"). SetBody([]byte(fmt.Sprintf(`{"name":"%s","amount":%f}`, n, rand.Float64()*10+10))). Patch("http://localhost:8081/account/deposit/") if err != nil { t.Fatal(err) } assert.Equal(t, 200, resp.StatusCode()) var r map[string]interface{} if err = json.Unmarshal(resp.Body(), &r); err != nil { t.Fatal(err, resp) } if e, ok := r["error"]; ok { t.Error(e) } } }) t.Run("deposit with errors", func(t *testing.T) { w := []struct { n string a float64 code int }{ { n: "wrongAccountName", a: 1, code: 404, }, { n: wallets[0], a: -1, code: 400, }, } for _, n := range w { send := func(name string, amount float64) (*resty.Response, error) { return client.R(). SetHeader("Content-Type", "application/json"). SetBody([]byte(fmt.Sprintf(`{"name":"%s","amount":%f}`, name, amount))). Patch("http://localhost:8081/account/deposit/") } resp, err := send(n.n, n.a) assert.Equal(t, n.code, resp.StatusCode()) if err != nil { t.Fatal(err) } var r map[string]interface{} if err = json.Unmarshal(resp.Body(), &r); err != nil { t.Fatal(err, resp) } if e, ok := r["error"]; !ok { t.Error(e) } } }) t.Run("transfer", func(t *testing.T) { w := []struct { nFrom string nTo string a float64 }{ { nFrom: wallets[0], nTo: wallets[1], a: 2, }, { nFrom: wallets[2], nTo: wallets[1], a: 2, }, { nFrom: wallets[2], nTo: wallets[3], a: 2, }, { nFrom: wallets[3], nTo: wallets[1], a: 2, }, { nFrom: wallets[1], nTo: wallets[2], a: 2, }, } for _, n := range w { send := func(name, nameTo string, amount float64) (*resty.Response, error) { return client.R(). SetHeader("Content-Type", "application/json"). SetBody([]byte(fmt.Sprintf(`{"from":"%s","to":"%s", "amount":%f}`, name, nameTo, amount))). Patch("http://localhost:8081/account/transfer/") } resp, err := send(n.nFrom, n.nTo, n.a) assert.Equal(t, 200, resp.StatusCode()) if err != nil { t.Fatal(err) } var r map[string]interface{} if err = json.Unmarshal(resp.Body(), &r); err != nil { t.Fatal(err, resp) } if e, ok := r["error"]; ok { t.Error(e) } } }) t.Run("transfer with errors", func(t *testing.T) { w := []struct { nFrom string nTo string a float64 code int }{ { nFrom: wallets[0], nTo: wallets[1], a: -1, code: 400, }, { nFrom: wallets[1], nTo: wallets[1], a: 2, code: 400, }, { nFrom: wallets[2], nTo: wallets[3], a: 0, code: 400, }, { nFrom: wallets[3], nTo: wallets[1], a: 2000, code: 400, }, { nFrom: "wrongAccountName", nTo: wallets[2], a: 2, code: 404, }, { nFrom: wallets[2], nTo: "wrongAccountName", a: 2, code: 404, }, } for _, n := range w { send := func(name, nameTo string, amount float64) (*resty.Response, error) { return client.R(). SetHeader("Content-Type", "application/json"). SetBody([]byte(fmt.Sprintf(`{"from":"%s","to":"%s", "amount":%f}`, name, nameTo, amount))). Patch("http://localhost:8081/account/transfer/") } resp, err := send(n.nFrom, n.nTo, n.a) assert.Equal(t, n.code, resp.StatusCode()) if err != nil { t.Fatal(err) } var r map[string]interface{} if err = json.Unmarshal(resp.Body(), &r); err != nil { t.Fatal(err, resp) } if e, ok := r["error"]; !ok { t.Error(e) } } }) t.Run("accounts list", func(t *testing.T) { resp, err := client.R().Get("http://localhost:8081/accounts/0/3/") if err != nil { t.Fatal(err) } assert.Equal(t, 200, resp.StatusCode()) var out map[string]interface{} if err = json.Unmarshal(resp.Body(), &out); err != nil { t.Fatal(err) } assert.Equal(t, len(out["list"].([]interface{})), 3) // o,_ := json.MarshalIndent(out,"","\t") // fmt.Println(string(o)) }) t.Run("all payment list", func(t *testing.T) { resp, err := client.R().Get("http://localhost:8081/payments/0/3/") if err != nil { t.Fatal(err) } assert.Equal(t, 200, resp.StatusCode()) var out map[string]interface{} if err = json.Unmarshal(resp.Body(), &out); err != nil { t.Fatal(err) } assert.Equal(t, len(out["list"].([]interface{})), 3) // o,_ := json.MarshalIndent(out,"","\t") // fmt.Println(string(o)) }) t.Run("account payment list", func(t *testing.T) { resp, err := client.R().Get(fmt.Sprintf("http://localhost:8081/payments/%s/0/3/", wallets[1])) if err != nil { t.Fatal(err) } assert.Equal(t, 200, resp.StatusCode()) var out map[string]interface{} if err = json.Unmarshal(resp.Body(), &out); err != nil { t.Fatal(err) } assert.Equal(t, len(out["list"].([]interface{})), 3) // o,_ := json.MarshalIndent(out,"","\t") // fmt.Println(string(o)) }) t.Run("delete 4 accounts", func(t *testing.T) { for _, n := range wallets { a, err := entity.NewAccount() if err != nil { t.Fatal(err) } if err = a.Find(entity.AccountName(n)); err != nil { t.Fatal(err) } if err = a.Delete(); err != nil { t.Error(err) } } }) }
ThermostatSpec.js
'use strict'; describe('Thermostat', function() { var thermostat; var temperature; beforeEach(function(){ thermostat = new Thermostat(); }); describe('defaults', function() { it('temperature of 20deg', function() { thermostat.currentTemp(0); expect(thermostat.temperature).toEqual(20); }); it('power saving ON', function() { expect(thermostat.powerSaving).toEqual(true) }); });
it('increases temperature', function() { thermostat.up(1); expect(thermostat.temperature).toEqual(21); }); it('decreases temperature', function() { thermostat.down(-1); expect(thermostat.temperature).toEqual(19); }); it('has a minimum temperature of 10deg', function(){ expect(function(){thermostat.down(-11);}).toThrowError("Minimum temperature is 10 degrees!"); }); it('resets temperature', function(){ thermostat.resetTemp(); expect(thermostat.temperature).toEqual(20); }); describe('Power saving on', function(){ it('has a maximum temperature of 25deg', function() { expect(function(){thermostat.up(6);}).toThrowError("Maximum temperature with power saving ON is 25 degrees!"); }); }); describe('Power saving off', function(){ it('has a maximum temperature of 32deg', function() { thermostat.powerSavingOff(); expect(function(){thermostat.up(13);}).toThrowError("Maximum temperature with power saving OFF is 32 degrees!"); }); }); describe('Energy usage', function(){ it('is low when below 18deg', function(){ thermostat.temperature = 17; expect(thermostat.energyUsage()).toEqual("low-usage"); }); it('is medium when between 18 and 24deg', function(){ thermostat.temperature = 20; expect(thermostat.energyUsage()).toEqual("medium-usage"); }); it('is high when 25deg or more', function(){ thermostat.temperature = 26; expect(thermostat.energyUsage()).toEqual("high-usage"); }); }); });
jest.config.js
/* eslint-disable import/no-extraneous-dependencies */ const { pathsToModuleNameMapper } = require('ts-jest/utils'); // Load the config which holds the path aliases. const { compilerOptions } = require('../../tsconfig.json'); module.exports = { ...require('../../jest.config'),
collectCoverageFrom: ['src/**/*.{t,j}s?(x)'], };
cpsr.rs
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::CPSR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct CPSDVSRR { bits: u8, } impl CPSDVSRR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Proxy"] pub struct _CPSDVSRW<'a> { w: &'a mut W, } impl<'a> _CPSDVSRW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 0xff; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32
#[doc = "Bits 0:7 - This even value between 2 and 254, by which PCLK is divided to yield the prescaler output clock. Bit 0 always reads as 0."] #[inline] pub fn cpsdvsr(&self) -> CPSDVSRR { let bits = { const MASK: u8 = 0xff; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }; CPSDVSRR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:7 - This even value between 2 and 254, by which PCLK is divided to yield the prescaler output clock. Bit 0 always reads as 0."] #[inline] pub fn cpsdvsr(&mut self) -> _CPSDVSRW { _CPSDVSRW { w: self } } }
{ self.bits }
orderer_test.go
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package capabilities import ( "testing" cb "github.com/hyperledger/fabric/protos/common" "github.com/stretchr/testify/assert" ) func TestOrdererV10(t *testing.T) { op := NewOrdererProvider(map[string]*cb.Capability{}) assert.NoError(t, op.Supported()) assert.Equal(t, ordererTypeName, op.Type()) assert.False(t, op.PredictableChannelTemplate()) assert.False(t, op.Resubmission()) assert.False(t, op.ExpirationCheck()) assert.False(t, op.ConsensusTypeMigration()) assert.False(t, op.UseChannelCreationPolicyAsAdmins()) } func TestOrdererV11(t *testing.T) { op := NewOrdererProvider(map[string]*cb.Capability{ OrdererV1_1: {}, }) assert.NoError(t, op.Supported()) assert.True(t, op.PredictableChannelTemplate()) assert.True(t, op.Resubmission()) assert.True(t, op.ExpirationCheck()) assert.False(t, op.ConsensusTypeMigration()) assert.False(t, op.UseChannelCreationPolicyAsAdmins()) } func TestOrdererV142(t *testing.T) { op := NewOrdererProvider(map[string]*cb.Capability{ OrdererV1_4_2: {}, }) assert.NoError(t, op.Supported()) assert.True(t, op.PredictableChannelTemplate()) assert.True(t, op.Resubmission()) assert.True(t, op.ExpirationCheck()) assert.True(t, op.ConsensusTypeMigration()) assert.False(t, op.UseChannelCreationPolicyAsAdmins()) } func TestOrdererV20(t *testing.T) { op := NewOrdererProvider(map[string]*cb.Capability{ OrdererV1_1: {}, OrdererV2_0: {}, }) assert.NoError(t, op.Supported()) assert.True(t, op.PredictableChannelTemplate()) assert.True(t, op.Resubmission()) assert.True(t, op.ExpirationCheck()) assert.True(t, op.ConsensusTypeMigration()) } func
(t *testing.T) { op := NewOrdererProvider(map[string]*cb.Capability{ OrdererV1_1: {}, OrdererV2_0: {}, "Bogus_Not_suported": {}, }) assert.EqualError(t, op.Supported(), "Orderer capability Bogus_Not_suported is required but not supported") }
TestNotSuported
build.rs
use std::path::PathBuf; fn
() { let path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); let opam_prefix = PathBuf::from(std::env::var("OPAM_SWITCH_PREFIX").unwrap()); let libirmin_prefix = std::env::var("LIBIRMIN_PREFIX"); let header = if let Ok(libirmin_prefix) = libirmin_prefix { let prefix = PathBuf::from(libirmin_prefix); println!("cargo:rustc-link-search={}", prefix.join("lib").display()); println!( "cargo:rustc-link-arg=-Wl,-rpath,{}", prefix.join("lib").display() ); prefix.join("include").join("irmin.h") } else if path.join("../libirmin.opam").exists() { // In repo std::process::Command::new("make") .arg("-C") .arg(path.join("..").as_os_str()) .spawn() .unwrap(); let lib = path .join("..") .join("_build") .join("default") .join("libirmin.so"); std::process::Command::new("cp") .arg(lib.as_os_str()) .arg(&path) .spawn() .unwrap(); println!("cargo:rustc-link-search={}", path.display()); println!("cargo:rustc-link-arg=-Wl,-rpath,{}", path.display()); path.join("..") .join("_build") .join("default") .join("irmin.h") } else if opam_prefix .join("lib") .join("libirmin") .join("lib") .join("libirmin.so") .exists() { // Using opam println!( "cargo:rustc-link-arg=-Wl,-rpath,{}", opam_prefix .join("lib") .join("libirmin") .join("lib") .display() ); println!( "cargo:rustc-link-search={}", opam_prefix .join("lib") .join("libirmin") .join("lib") .display() ); opam_prefix .join("lib") .join("libirmin") .join("include") .join("irmin.h") } else if opam_prefix.join("lib").join("libirmin.so").exists() { // Using opam prefix println!( "cargo:rustc-link-arg=-Wl,-rpath,{}", opam_prefix.join("lib").display() ); println!( "cargo:rustc-link-search={}", opam_prefix.join("lib").display() ); opam_prefix.join("include").join("irmin.h") } else { // Installed in $HOME/.local or /usr/local let home = std::env::var("HOME").unwrap_or_default(); let user = PathBuf::from(home).join(".local"); if user.join("include").join("irmin.h").exists() { println!("cargo:rustc-link-search={}", user.join("lib").display()); user.join("include").join("irmin.h") } else { println!("cargo:rustc-link-search=/usr/local/lib"); PathBuf::from("/usr/local/include/irmin.h") } }; println!("cargo:rustc-link-lib=irmin"); println!("cargo:rerun-if-changed={}", header.display()); let bindings = bindgen::builder() .header(header.to_str().unwrap()) .allowlist_type("Irmin.*") .allowlist_function("irmin.*") .allowlist_function("caml.*") .generate() .unwrap(); let out_path = std::path::PathBuf::from(std::env::var("OUT_DIR").unwrap()); bindings.write_to_file(out_path.join("c.rs")).unwrap(); }
main
double.rs
use crate::native::registry::Registry; use crate::runtime::frame::Frame; use crate::utils::numbers::{f64_to_i64, i64_to_f64}; pub fn init() { Registry::register( "java/lang/Double", "doubleToRawLongBits", "(D)J", double_to_raw_long_bits, ); Registry::register( "java/lang/Double", "longBitsToDouble", "(J)D", long_bits_to_double, ); } pub fn double_to_raw_long_bits(frame: &mut Frame) { let value = frame.local_vars().expect("vars is none").get_double(0); frame .operand_stack() .expect("stack is none") .push_long(f64_to_i64(value)); }
frame .operand_stack() .expect("stack is none") .push_double(i64_to_f64(value)); }
pub fn long_bits_to_double(frame: &mut Frame) { let value = frame.local_vars().expect("vars is none").get_long(0);
850. Rectangle Area II.go
package leetcode import ( "sort" ) func rectangleArea(rectangles [][]int) int { sat, res := SegmentAreaTree{}, 0 posXMap, posX, posYMap, posY, lines := discretization850(rectangles) tmp := make([]int, len(posYMap)) for i := 0; i < len(tmp)-1; i++ { tmp[i] = posY[i+1] - posY[i] } sat.Init(tmp, func(i, j int) int { return i + j }) for i := 0; i < len(posY)-1; i++ { tmp[i] = posY[i+1] - posY[i] } for i := 0; i < len(posX)-1; i++ { for _, v := range lines[posXMap[posX[i]]] { sat.Update(posYMap[v.start], posYMap[v.end], v.state) } res += ((posX[i+1] - posX[i]) * sat.Query(0, len(posY)-1)) % 1000000007 } return res % 1000000007 } func discretization850(positions [][]int) (map[int]int, []int, map[int]int, []int, map[int][]LineItem) { tmpXMap, tmpYMap, posXArray, posXMap, posYArray, posYMap, lines := map[int]int{}, map[int]int{}, []int{}, map[int]int{}, []int{}, map[int]int{}, map[int][]LineItem{} for _, pos := range positions { tmpXMap[pos[0]]++ tmpXMap[pos[2]]++ } for k := range tmpXMap { posXArray = append(posXArray, k) } sort.Ints(posXArray) for i, pos := range posXArray { posXMap[pos] = i } for _, pos := range positions { tmpYMap[pos[1]]++ tmpYMap[pos[3]]++ tmp1 := lines[posXMap[pos[0]]] tmp1 = append(tmp1, LineItem{start: pos[1], end: pos[3], state: 1}) lines[posXMap[pos[0]]] = tmp1 tmp2 := lines[posXMap[pos[2]]] tmp2 = append(tmp2, LineItem{start: pos[1], end: pos[3], state: -1}) lines[posXMap[pos[2]]] = tmp2 } for k := range tmpYMap { posYArray = append(posYArray, k) } sort.Ints(posYArray) for i, pos := range posYArray { posYMap[pos] = i } return posXMap, posXArray, posYMap, posYArray, lines } // LineItem define type LineItem struct { // 垂直于 x 轴的线段 start, end, state int // state = 1 代表进入,-1 代表离开 } // SegmentItem define type SegmentItem struct { count int val int } // SegmentAreaTree define type SegmentAreaTree struct { data []int tree []SegmentItem left, right int merge func(i, j int) int } // Init define func (sat *SegmentAreaTree) Init(nums []int, oper func(i, j int) int) { sat.merge = oper data, tree := make([]int, len(nums)), make([]SegmentItem, 4*len(nums)) for i := 0; i < len(nums); i++ { data[i] = nums[i] } sat.data, sat.tree = data, tree if len(nums) > 0 { sat.buildSegmentTree(0, 0, len(nums)-1) } } // 在 treeIndex 的位置创建 [left....right] 区间的线段树 func (sat *SegmentAreaTree) buildSegmentTree(treeIndex, left, right int) { if left == right-1 { sat.tree[treeIndex] = SegmentItem{count: 0, val: sat.data[left]} return } midTreeIndex, leftTreeIndex, rightTreeIndex := left+(right-left)>>1, sat.leftChild(treeIndex), sat.rightChild(treeIndex) sat.buildSegmentTree(leftTreeIndex, left, midTreeIndex) sat.buildSegmentTree(rightTreeIndex, midTreeIndex, right) sat.pushUp(treeIndex, leftTreeIndex, rightTreeIndex) } func (sat *SegmentAreaTree) pushUp(treeIndex, leftTreeIndex, rightTreeIndex int) { newCount, newValue := sat.merge(sat.tree[leftTreeIndex].count, sat.tree[rightTreeIndex].count), 0 if sat.tree[leftTreeIndex].count > 0 && sat.tree[rightTreeIndex].count > 0 { newValue = sat.merge(sat.tree[leftTreeIndex].val, sat.tree[rightTreeIndex].val) } else if sat.tree[leftTreeIndex].count > 0 && sat.tree[rightTreeIndex].count == 0 { newValue = sat.tree[leftTreeIndex].val } else if sat.tree[leftTreeIndex].count == 0 && sat.tree[rightTreeIndex].count > 0 { newValue = sat.tree[rightTreeIndex].val } sat.tree[treeIndex] = SegmentItem{count: newCount, val: newValue} } func (sat *SegmentAreaTree) leftChild(index int) int { return 2*index + 1 } func (sat *SegmentAreaTree) rightChild(index int) int { return 2*index + 2 } // 查询 [left....right] 区间内的值 // Query define func (sat *SegmentAreaTree) Query(left, right int) int { if len(sat.data) > 0 { return sat.queryInTree(0, 0, len(sat.data)-1, left, right) } return 0 } func (sat *SegmentAreaTree) queryInTree(treeIndex, left, right, queryLeft, queryRight int) int { midTreeIndex, leftTreeIndex, rightTreeIndex := left+(right-left)>>1, sat.leftChild(treeIndex), sat.rightChild(treeIndex) if left > queryRight || right < queryLeft { // segment completely outside range return 0 // represents a null
inside range if sat.tree[treeIndex].count > 0 { return sat.tree[treeIndex].val } return 0 } if queryLeft > midTreeIndex { return sat.queryInTree(rightTreeIndex, midTreeIndex, right, queryLeft, queryRight) } else if queryRight <= midTreeIndex { return sat.queryInTree(leftTreeIndex, left, midTreeIndex, queryLeft, queryRight) } // merge query results return sat.merge(sat.queryInTree(leftTreeIndex, left, midTreeIndex, queryLeft, midTreeIndex), sat.queryInTree(rightTreeIndex, midTreeIndex, right, midTreeIndex, queryRight)) } // Update define func (sat *SegmentAreaTree) Update(updateLeft, updateRight, val int) { if len(sat.data) > 0 { sat.updateInTree(0, 0, len(sat.data)-1, updateLeft, updateRight, val) } } func (sat *SegmentAreaTree) updateInTree(treeIndex, left, right, updateLeft, updateRight, val int) { midTreeIndex, leftTreeIndex, rightTreeIndex := left+(right-left)>>1, sat.leftChild(treeIndex), sat.rightChild(treeIndex) if left > right || left >= updateRight || right <= updateLeft { // 由于叶子节点的区间不在是 left == right 所以这里判断需要增加等号的判断 return // out of range. escape. } if updateLeft <= left && right <= updateRight { // segment is fully within update range if left == right-1 { sat.tree[treeIndex].count = sat.merge(sat.tree[treeIndex].count, val) } if left != right-1 { // update lazy[] for children sat.updateInTree(leftTreeIndex, left, midTreeIndex, updateLeft, updateRight, val) sat.updateInTree(rightTreeIndex, midTreeIndex, right, updateLeft, updateRight, val) sat.pushUp(treeIndex, leftTreeIndex, rightTreeIndex) } return } sat.updateInTree(leftTreeIndex, left, midTreeIndex, updateLeft, updateRight, val) sat.updateInTree(rightTreeIndex, midTreeIndex, right, updateLeft, updateRight, val) // merge updates sat.pushUp(treeIndex, leftTreeIndex, rightTreeIndex) }
node } if queryLeft <= left && queryRight >= right { // segment completely
scr_118.go
//go:build go1.18 // +build go1.18 package scr // Must panics if err is not nil. v is returned if err is nil. // f := scr.Must(os.Create("example")) // defer scr.Close(f) // // write to f func Must[T any](v T, err error) T { if err != nil { panic(err)
return v }
}
source_change.rs
//! This modules defines type to represent changes to the source code, that flow //! from the server to the client. //! //! It can be viewed as a dual for `AnalysisChange`. use std::{ collections::hash_map::Entry, iter::{self, FromIterator}, }; use base_db::{AnchoredPathBuf, FileId}; use rustc_hash::FxHashMap; use stdx::never; use text_edit::TextEdit; #[derive(Default, Debug, Clone)] pub struct SourceChange { pub source_file_edits: FxHashMap<FileId, TextEdit>, pub file_system_edits: Vec<FileSystemEdit>, pub is_snippet: bool, } impl SourceChange { /// Creates a new SourceChange with the given label /// from the edits. pub fn from_edits( source_file_edits: FxHashMap<FileId, TextEdit>, file_system_edits: Vec<FileSystemEdit>, ) -> Self { SourceChange { source_file_edits, file_system_edits, is_snippet: false } } pub fn from_text_edit(file_id: FileId, edit: TextEdit) -> Self
pub fn insert_source_edit(&mut self, file_id: FileId, edit: TextEdit) { match self.source_file_edits.entry(file_id) { Entry::Occupied(mut entry) => { never!(entry.get_mut().union(edit).is_err(), "overlapping edits for same file"); } Entry::Vacant(entry) => { entry.insert(edit); } } } pub fn push_file_system_edit(&mut self, edit: FileSystemEdit) { self.file_system_edits.push(edit); } pub fn get_source_edit(&self, file_id: FileId) -> Option<&TextEdit> { self.source_file_edits.get(&file_id) } } impl Extend<(FileId, TextEdit)> for SourceChange { fn extend<T: IntoIterator<Item = (FileId, TextEdit)>>(&mut self, iter: T) { iter.into_iter().for_each(|(file_id, edit)| self.insert_source_edit(file_id, edit)); } } impl From<FxHashMap<FileId, TextEdit>> for SourceChange { fn from(source_file_edits: FxHashMap<FileId, TextEdit>) -> SourceChange { SourceChange { source_file_edits, file_system_edits: Vec::new(), is_snippet: false } } } #[derive(Debug, Clone)] pub enum FileSystemEdit { CreateFile { dst: AnchoredPathBuf, initial_contents: String }, MoveFile { src: FileId, dst: AnchoredPathBuf }, } impl From<FileSystemEdit> for SourceChange { fn from(edit: FileSystemEdit) -> SourceChange { SourceChange { source_file_edits: Default::default(), file_system_edits: vec![edit], is_snippet: false, } } }
{ SourceChange { source_file_edits: FxHashMap::from_iter(iter::once((file_id, edit))), ..Default::default() } }
media_player.py
"""Support for Epson projector.""" from __future__ import annotations import logging from epson_projector.const import ( BACK, BUSY, CMODE, CMODE_LIST, CMODE_LIST_SET, DEFAULT_SOURCES, EPSON_CODES, FAST, INV_SOURCES, MUTE, PAUSE, PLAY, POWER, SOURCE, SOURCE_LIST, STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE, TURN_OFF, TURN_ON, VOL_DOWN, VOL_UP, VOLUME, ) import voluptuous as vol from homeassistant.components.media_player import ( MediaPlayerEntity, MediaPlayerEntityFeature, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import STATE_OFF, STATE_ON from homeassistant.core import HomeAssistant from homeassistant.helpers import entity_platform import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry from .const import ATTR_CMODE, DOMAIN, SERVICE_SELECT_CMODE _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the Epson projector from a config entry.""" entry_id = config_entry.entry_id unique_id = config_entry.unique_id projector = hass.data[DOMAIN][entry_id] projector_entity = EpsonProjectorMediaPlayer( projector=projector, name=config_entry.title, unique_id=unique_id, entry=config_entry, ) async_add_entities([projector_entity], True) platform = entity_platform.async_get_current_platform() platform.async_register_entity_service( SERVICE_SELECT_CMODE, {vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))}, SERVICE_SELECT_CMODE, ) class EpsonProjectorMediaPlayer(MediaPlayerEntity): """Representation of Epson Projector Device.""" _attr_supported_features = ( MediaPlayerEntityFeature.TURN_ON | MediaPlayerEntityFeature.TURN_OFF | MediaPlayerEntityFeature.SELECT_SOURCE | MediaPlayerEntityFeature.VOLUME_MUTE | MediaPlayerEntityFeature.VOLUME_STEP | MediaPlayerEntityFeature.NEXT_TRACK | MediaPlayerEntityFeature.PREVIOUS_TRACK ) def __init__(self, projector, name, unique_id, entry): """Initialize entity to control Epson projector.""" self._projector = projector self._entry = entry self._name = name self._available = False self._cmode = None self._source_list = list(DEFAULT_SOURCES.values()) self._source = None self._volume = None self._state = None self._unique_id = unique_id async def set_unique_id(self): """Set unique id for projector config entry.""" _LOGGER.debug("Setting unique_id for projector") if self._unique_id: return False if uid := await self._projector.get_serial_number(): self.hass.config_entries.async_update_entry(self._entry, unique_id=uid) registry = async_get_entity_registry(self.hass) old_entity_id = registry.async_get_entity_id( "media_player", DOMAIN, self._entry.entry_id ) if old_entity_id is not None:
self.hass.async_create_task( self.hass.config_entries.async_reload(self._entry.entry_id) ) return True async def async_update(self): """Update state of device.""" power_state = await self._projector.get_power() _LOGGER.debug("Projector status: %s", power_state) if not power_state or power_state == EPSON_STATE_UNAVAILABLE: self._available = False return self._available = True if power_state == EPSON_CODES[POWER]: self._state = STATE_ON if await self.set_unique_id(): return self._source_list = list(DEFAULT_SOURCES.values()) cmode = await self._projector.get_property(CMODE) self._cmode = CMODE_LIST.get(cmode, self._cmode) source = await self._projector.get_property(SOURCE) self._source = SOURCE_LIST.get(source, self._source) volume = await self._projector.get_property(VOLUME) if volume: self._volume = volume elif power_state == BUSY: self._state = STATE_ON else: self._state = STATE_OFF @property def device_info(self) -> DeviceInfo | None: """Get attributes about the device.""" if not self._unique_id: return None return DeviceInfo( identifiers={(DOMAIN, self._unique_id)}, manufacturer="Epson", model="Epson", name="Epson projector", via_device=(DOMAIN, self._unique_id), ) @property def name(self): """Return the name of the device.""" return self._name @property def unique_id(self): """Return unique ID.""" return self._unique_id @property def state(self): """Return the state of the device.""" return self._state @property def available(self): """Return if projector is available.""" return self._available async def async_turn_on(self): """Turn on epson.""" if self._state == STATE_OFF: await self._projector.send_command(TURN_ON) async def async_turn_off(self): """Turn off epson.""" if self._state == STATE_ON: await self._projector.send_command(TURN_OFF) @property def source_list(self): """List of available input sources.""" return self._source_list @property def source(self): """Get current input sources.""" return self._source @property def volume_level(self): """Return the volume level of the media player (0..1).""" return self._volume async def select_cmode(self, cmode): """Set color mode in Epson.""" await self._projector.send_command(CMODE_LIST_SET[cmode]) async def async_select_source(self, source): """Select input source.""" selected_source = INV_SOURCES[source] await self._projector.send_command(selected_source) async def async_mute_volume(self, mute): """Mute (true) or unmute (false) sound.""" await self._projector.send_command(MUTE) async def async_volume_up(self): """Increase volume.""" await self._projector.send_command(VOL_UP) async def async_volume_down(self): """Decrease volume.""" await self._projector.send_command(VOL_DOWN) async def async_media_play(self): """Play media via Epson.""" await self._projector.send_command(PLAY) async def async_media_pause(self): """Pause media via Epson.""" await self._projector.send_command(PAUSE) async def async_media_next_track(self): """Skip to next.""" await self._projector.send_command(FAST) async def async_media_previous_track(self): """Skip to previous.""" await self._projector.send_command(BACK) @property def extra_state_attributes(self): """Return device specific state attributes.""" if self._cmode is None: return {} return {ATTR_CMODE: self._cmode}
registry.async_update_entity(old_entity_id, new_unique_id=uid)
vel_publisher.py
#! /usr/bin/python from RPi import GPIO import signal import rospy from geometry_msgs.msg import Twist clk1 = 13 #left wheel dt1 = 6 clk2 = 19 #right Wheel dt2 = 26 def keyboardInterruptHandler(signal, frame): print("this is velocity publisher signing off ...") exit(0) GPIO.setmode(GPIO.BCM) GPIO.setup(clk1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) GPIO.setup(dt1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) GPIO.setup(clk2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) GPIO.setup(dt2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) pub = rospy.Publisher("g_cmd_vel",Twist,queue_size = 10) rospy.init_node("encoders_data") vel = Twist() vel.linear.x = 0 vel.linear.y = 0 vel.linear.z = 0 vel.angular.x = 0 vel.angular.y = 0 vel.angular.z = 0 previous_time_l = rospy.get_time() previous_time_r = rospy.get_time() left_vel = 0 right_vel = 0 left_dist = 0 right_dist = 0 dl = 0 dr = 0 left_dist_prev = 0
try: while True: clkState1 = GPIO.input(clk1) clkState2 = GPIO.input(clk2) dtState1= GPIO.input(dt1) dtState2= GPIO.input(dt2) present_time = rospy.get_time() dtl = (present_time - previous_time_l) dtr = (present_time - previous_time_r) if clkState1 != clkLastState1: if dtState1 != clkState1: left_dist+= 0.0055 else: left_dist+= -0.0055 if clkState2 != clkLastState2: if dtState2 != clkState2: right_dist+= 0.0055 else: right_dist+= -0.0055 dl = left_dist-left_dist_prev dr = right_dist-right_dist_prev if abs(dtl) > 0.1 and abs(dtr) > 0.1: left_vel = (left_dist-left_dist_prev)/dtl left_dist_prev = left_dist previous_time_l = present_time right_vel = (right_dist-right_dist_prev)/dtr right_dist_prev = right_dist previous_time_r = present_time clkLastState1 = clkState1 clkLastState2 = clkState2 vel.linear.x = (left_vel + right_vel)/2 vel.angular.z = (right_vel - left_vel)/(0.195) vel.linear.x = round(vel.linear.x ,2) vel.angular.z = round(vel.angular.z,2) pub.publish(vel) rospy.loginfo(vel) signal.signal(signal.SIGINT, keyboardInterruptHandler) finally: GPIO.cleanup()
right_dist_prev = 0 clkLastState1 = GPIO.input(clk1) clkLastState2 = GPIO.input(clk2)
trans_helper.go
package command import ( "etym/pkg/db/model" "html" "regexp" "strings" ) type CombineResult struct { Original string Translation string Final int } func
(sentences []*model.Sentence) CombineResult { var pivot = -1 for i := range sentences { sentences[i].Trans = strings.TrimSpace(html.UnescapeString(sentences[i].Trans)) sentences[i].Orig = strings.TrimSpace(html.UnescapeString(sentences[i].Orig)) if strings.HasPrefix(sentences[i].Orig, "Related:") { pivot = i } } // 相关单词不做翻译 if pivot >= 0 { var orig []string for j := pivot; j < len(sentences); j++ { orig = append(orig, sentences[j].Orig) } origstr := strings.Join(orig, " ") sentences = sentences[:pivot+1] sentences[pivot].Orig = origstr sentences[pivot].Trans = origstr } var origs []string var trans []string for _, s := range sentences { origs = append(origs, s.Orig) trans = append(trans, s.Trans) } combineOrigs := strings.Join(origs, " ") combineTrans := strings.Join(trans, " ") final := 0 // see开头的不翻译 if strings.HasPrefix(combineOrigs, "see") { combineTrans = strings.Replace(combineOrigs, "see", "参阅", 1) final = 1 } else if strings.HasPrefix(combineOrigs, "See") { combineTrans = strings.Replace(combineOrigs, "See", "参阅", 1) final = 2 } else { // 参考不翻译 (see xxxxx) seeOrigRE := regexp.MustCompile(`\(see\s.*?(\(\w+\.?.*?\))?.*?\)`) seeTranRE := regexp.MustCompile(`(.*?(((\w+)。(.*?)))?.*?)`) if xx := seeOrigRE.FindAll([]byte(combineOrigs), -1); len(xx) > 0 { indexes := seeTranRE.FindAllIndex([]byte(combineTrans), -1) if len(indexes) != len(xx) { // 保守策略, 这种情况直接使用原文 //combineTrans = combineOrigs } else { lastIndex := indexes[len(indexes)-1][1] var trans string //log.Infof("+=> %s", sentences[i].Orig) //log.Infof("+=> %s", sentences[i].Trans) for j, index := range indexes { trans += combineTrans[:index[0]] trans += string(xx[j]) } trans += combineTrans[lastIndex:] //log.Infof("+=> %s", trans) //sentences[i].Trans = string(tranRE.ReplaceAll([]byte(sentences[i].Trans), []byte("($1.$2)"))) //log.Infof("+=> %s", sentences[i].Trans) combineTrans = trans } } // 词性不翻译, (n.)被Google翻译为(年.) origRE := regexp.MustCompile(`\(\w+\..*?\)`) tranRE := regexp.MustCompile(`((\w+)。(.*?))`) if x := origRE.FindString(combineOrigs); len(x) > 0 { combineTrans = string(tranRE.ReplaceAllString(combineTrans, "($1.$2)")) } } return CombineResult{Original: combineOrigs, Translation: combineTrans, Final: final} }
combineSentences
bitcoin_ro_RO.ts
<TS language="ro_RO" version="2.1"> <context> <name>AddressBookPage</name> <message> <source>Right-click to edit address or label</source> <translation>Click-dreapta pentru a edita adresa sau eticheta</translation> </message> <message> <source>Create a new address</source> <translation>Creează o adresă nouă</translation> </message> <message> <source>&amp;New</source> <translation>&amp;Nou</translation> </message> <message> <source>Copy the currently selected address to the system clipboard</source> <translation>Copiază adresa selectată în clipboard</translation> </message> <message> <source>&amp;Copy</source> <translation>&amp;Copiază</translation> </message> <message> <source>C&amp;lose</source> <translation>Î&amp;nchide</translation> </message> <message> <source>Delete the currently selected address from the list</source> <translation>Şterge adresa selectată din listă</translation> </message> <message> <source>Enter address or label to search</source> <translation>Introduceţi adresa sau eticheta pentru căutare</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>Exportă datele din tab-ul curent într-un fişier</translation> </message> <message> <source>&amp;Export</source> <translation>&amp;Exportă</translation> </message> <message> <source>&amp;Delete</source> <translation>&amp;Şterge</translation> </message> <message> <source>Choose the address to send coins to</source> <translation>Alege $adresa unde să trimiteţi monede</translation> </message> <message> <source>Choose the address to receive coins with</source> <translation>Alege adresa la care sa primesti monedele cu</translation> </message> <message> <source>C&amp;hoose</source> <translation>A&amp;lege</translation> </message> <message> <source>Sending addresses</source> <translation>Adresa de trimitere</translation> </message> <message> <source>Receiving addresses</source> <translation>Adresa de primire</translation> </message> <message> <source>These are your Pottercoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>Acestea sunt adresele tale Pottercoin pentru efectuarea platilor. Intotdeauna verifica atent suma de plata si adresa beneficiarului inainte de a trimite monede.</translation> </message> <message> <source>These are your Pottercoin addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source> <translation>Acestea sunt adresele tale Pottercoin pentru receptionarea platilor. Este recomandat sa folosesti mereu o adresa noua pentru primirea platilor.</translation> </message> <message> <source>&amp;Copy Address</source> <translation>&amp;Copiază Adresa</translation> </message> <message> <source>Copy &amp;Label</source> <translation>Copiaza si eticheteaza</translation> </message> <message> <source>&amp;Edit</source> <translation>&amp;Editare</translation> </message> <message> <source>Export Address List</source> <translation>Exportă listă de adrese</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>Fisier cu separator virgulă (*.csv)</translation> </message> <message> <source>Exporting Failed</source> <translation>Export nereusit</translation> </message> <message> <source>There was an error trying to save the address list to %1. Please try again.</source> <translation>A apărut o eroare la salvarea listei de adrese la %1. Vă rugăm să încercaţi din nou.</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <source>Label</source> <translation>Etichetă</translation> </message> <message> <source>Address</source> <translation>Adresă</translation> </message> <message> <source>(no label)</source> <translation>(fără etichetă)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <source>Passphrase Dialog</source> <translation>Dialogul pentru fraza de acces</translation> </message> <message> <source>Enter passphrase</source> <translation>Introduceţi fraza de acces</translation> </message> <message> <source>New passphrase</source> <translation>Frază de acces nouă</translation> </message> <message> <source>Repeat new passphrase</source> <translation>Repetaţi noua frază de acces</translation> </message> <message> <source>Show password</source> <translation>Arata parola</translation> </message> <message> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Introduceţi noua parolă a portofelului electronic.&lt;br/&gt;Vă rugăm să folosiţi o parolă de&lt;b&gt;minimum 10 caractere aleatoare&lt;/b&gt;, sau &lt;b&gt;minimum 8 cuvinte&lt;/b&gt;.</translation> </message> <message> <source>Encrypt wallet</source> <translation>Criptare portofel</translation> </message> <message> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Această acţiune necesită introducerea parolei de acces pentru deblocarea portofelului.</translation> </message> <message> <source>Unlock wallet</source> <translation>Deblocare portofel</translation> </message> <message> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Această acţiune necesită introducerea parolei de acces pentru decriptarea portofelului.</translation> </message> <message> <source>Decrypt wallet</source> <translation>Decriptare portofel</translation> </message> <message> <source>Change passphrase</source> <translation>Schimbă parola</translation> </message> <message> <source>Enter the old passphrase and new passphrase to the wallet.</source> <translation>Introduceţi vechea şi noua parolă pentru portofel.</translation> </message> <message> <source>Confirm wallet encryption</source> <translation>Confirmaţi criptarea portofelului</translation> </message> <message> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR LITECOINS&lt;/b&gt;!</source> <translation>Atenţie: Dacă va criptati portofelul si ulterior pierdeti parola, &lt;b&gt;VEŢI PIERDE TOTI LITECOINII&lt;/b&gt;!</translation> </message> <message> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Sigur doriţi să criptaţi portofelul dvs.?</translation> </message> <message> <source>Wallet encrypted</source> <translation>Portofel criptat</translation> </message> <message> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>IMPORTANT: Orice copie de siguranţă făcută anterior portofelului dumneavoastră ar trebui înlocuită cu cea generată cel mai recent, fişier criptat al portofelului. Pentru siguranţă, copiile de siguranţă vechi ale portofelului ne-criptat vor deveni inutile imediat ce veţi începe folosirea noului fişier criptat al portofelului.</translation> </message> <message> <source>Wallet encryption failed</source> <translation>Criptarea portofelului a eşuat.</translation> </message> <message> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Criptarea portofelului nu a reuşit din cauza unei erori interne. Portofelul dvs. nu a fost criptat.</translation> </message> <message> <source>The supplied passphrases do not match.</source> <translation>Parolele furnizate nu se potrivesc.</translation> </message> <message> <source>Wallet unlock failed</source> <translation>Deblocarea portofelului a esuat.</translation> </message> <message> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>Parola introdusă pentru decriptarea portofelului a fost incorectă.</translation> </message> <message> <source>Wallet decryption failed</source> <translation>Decriptarea portofelului a esuat.</translation> </message> <message> <source>Wallet passphrase was successfully changed.</source> <translation>Parola portofelului a fost schimbata.</translation> </message> <message> <source>Warning: The Caps Lock key is on!</source> <translation>Atenţie! Caps Lock este pornit!</translation> </message> </context> <context> <name>BanTableModel</name> <message> <source>IP/Netmask</source> <translation>IP/Netmask</translation> </message> <message> <source>Banned Until</source> <translation>Banat până la</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <source>Sign &amp;message...</source> <translation>Semnează &amp;mesaj...</translation> </message> <message> <source>Synchronizing with network...</source> <translation>Se sincronizează cu reţeaua...</translation> </message> <message> <source>&amp;Overview</source> <translation>&amp;Imagine de ansamblu</translation> </message> <message> <source>Show general overview of wallet</source> <translation>Arată o stare generală de ansamblu a portofelului</translation> </message> <message> <source>&amp;Transactions</source> <translation>&amp;Tranzacţii</translation> </message> <message> <source>Browse transaction history</source> <translation>Răsfoire istoric tranzacţii</translation> </message> <message> <source>E&amp;xit</source> <translation>Ieşire</translation> </message> <message> <source>Quit application</source> <translation>Închide aplicaţia</translation> </message> <message> <source>&amp;About %1</source> <translation>&amp;Despre %1</translation> </message> <message> <source>Show information about %1</source> <translation>Arată informaţii despre %1</translation> </message> <message> <source>About &amp;Qt</source> <translation>Despre &amp;Qt</translation> </message> <message> <source>Show information about Qt</source> <translation>Arată informaţii despre Qt</translation> </message> <message> <source>&amp;Options...</source> <translation>&amp;Opţiuni...</translation> </message> <message> <source>Modify configuration options for %1</source> <translation>Modifică opţiunile de configurare pentru %1</translation> </message> <message> <source>&amp;Encrypt Wallet...</source> <translation>Cript&amp;ează portofelul...</translation> </message> <message> <source>&amp;Backup Wallet...</source> <translation>Face o copie de siguranţă a portofelului...</translation> </message> <message> <source>&amp;Change Passphrase...</source> <translation>S&amp;chimbă parola...</translation> </message> <message> <source>Open &amp;URI...</source> <translation>Deschide &amp;URI...</translation> </message> <message> <source>Wallet:</source> <translation>Portofel:</translation> </message> <message> <source>Click to disable network activity.</source> <translation>Click pentru a opri activitatea retelei.</translation> </message> <message> <source>Network activity disabled.</source> <translation>Activitatea retelei a fost oprita.</translation> </message> <message> <source>Click to enable network activity again.</source> <translation>Click pentu a porni activitatea retelei.</translation> </message> <message> <source>Syncing Headers (%1%)...</source> <translation>Se sincronizeaza Header-ele (%1%)...</translation> </message> <message> <source>Reindexing blocks on disk...</source> <translation>Se reindexează blocurile pe disc...</translation> </message> <message> <source>Proxy is &lt;b&gt;enabled&lt;/b&gt;: %1</source> <translation>Proxy este&lt;b&gt;activat&lt;/b&gt;:%1</translation> </message> <message> <source>Send coins to a Pottercoin address</source> <translation>Trimite monede către o adresă Pottercoin</translation> </message> <message> <source>Backup wallet to another location</source> <translation>Creează o copie de rezervă a portofelului într-o locaţie diferită</translation> </message> <message> <source>Change the passphrase used for wallet encryption</source> <translation>Schimbă fraza de acces folosită pentru criptarea portofelului</translation> </message> <message> <source>&amp;Debug window</source> <translation>Fereastra de &amp;depanare</translation> </message> <message> <source>Open debugging and diagnostic console</source> <translation>Deschide consola de depanare şi diagnosticare</translation> </message> <message> <source>&amp;Verify message...</source> <translation>&amp;Verifică mesaj...</translation> </message> <message> <source>Pottercoin</source> <translation>Pottercoin</translation> </message> <message> <source>&amp;Send</source> <translation>Trimite</translation> </message> <message> <source>&amp;Receive</source> <translation>P&amp;rimeşte</translation> </message> <message> <source>&amp;Show / Hide</source> <translation>Arată/Ascunde</translation> </message> <message> <source>Show or hide the main Window</source> <translation>Arată sau ascunde fereastra principală</translation> </message> <message> <source>Encrypt the private keys that belong to your wallet</source> <translation>Criptează cheile private ale portofelului dvs.</translation> </message> <message> <source>Sign messages with your Pottercoin addresses to prove you own them</source> <translation>Semnaţi mesaje cu adresa dvs. Pottercoin pentru a dovedi că vă aparţin</translation> </message> <message> <source>Verify messages to ensure they were signed with specified Pottercoin addresses</source> <translation>Verificaţi mesaje pentru a vă asigura că au fost semnate cu adresa Pottercoin specificată</translation> </message> <message> <source>&amp;File</source> <translation>&amp;Fişier</translation> </message> <message> <source>&amp;Settings</source> <translation>&amp;Setări</translation> </message> <message> <source>&amp;Help</source> <translation>A&amp;jutor</translation> </message> <message> <source>Tabs toolbar</source> <translation>Bara de unelte</translation> </message> <message> <source>Request payments (generates QR codes and pottercoin: URIs)</source> <translation>Cereţi plăţi (generează coduri QR şi pottercoin-uri: URls)</translation> </message> <message> <source>Show the list of used sending addresses and labels</source> <translation>Arată lista de adrese trimise şi etichetele folosite.</translation> </message> <message> <source>Show the list of used receiving addresses and labels</source> <translation>Arată lista de adrese pentru primire şi etichetele</translation> </message> <message> <source>Open a pottercoin: URI or payment request</source> <translation>Deschidere pottercoin: o adresa URI sau o cerere de plată</translation> </message> <message> <source>&amp;Command-line options</source> <translation>Opţiuni linie de &amp;comandă</translation> </message> <message numerus="yes"> <source>%n active connection(s) to Pottercoin network</source> <translation><numerusform>%n conexiune activă către reţeaua Pottercoin</numerusform><numerusform>%n conexiuni active către reţeaua Pottercoin</numerusform><numerusform>%n de conexiuni active către reţeaua Pottercoin</numerusform></translation> </message> <message> <source>Indexing blocks on disk...</source> <translation>Se indexează blocurile pe disc...</translation> </message> <message> <source>Processing blocks on disk...</source> <translation>Se proceseaza blocurile pe disc...</translation> </message> <message numerus="yes"> <source>Processed %n block(s) of transaction history.</source> <translation><numerusform>S-a procesat %n bloc din istoricul tranzacţiilor.</numerusform><numerusform>S-au procesat %n blocuri din istoricul tranzacţiilor.</numerusform><numerusform>S-au procesat %n de blocuri din istoricul tranzacţiilor.</numerusform></translation> </message> <message> <source>%1 behind</source> <translation>%1 în urmă</translation> </message> <message> <source>Last received block was generated %1 ago.</source> <translation>Ultimul bloc recepţionat a fost generat acum %1.</translation> </message> <message> <source>Transactions after this will not yet be visible.</source> <translation>Tranzacţiile după aceasta nu vor fi vizibile încă.</translation> </message> <message> <source>Error</source> <translation>Eroare</translation> </message> <message> <source>Warning</source> <translation>Avertisment</translation> </message> <message> <source>Information</source> <translation>Informaţie</translation> </message> <message> <source>Up to date</source> <translation>Actualizat</translation> </message> <message> <source>Show the %1 help message to get a list with possible Pottercoin command-line options</source> <translation>Arată mesajul de ajutor %1 pentru a obţine o listă cu opţiunile posibile de linii de comandă Pottercoin</translation> </message> <message> <source>default wallet</source> <translation>portofel implicit</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;Fereastră</translation> </message> <message> <source>Minimize</source> <translation>Minimizare</translation> </message> <message> <source>%1 client</source> <translation>Client %1</translation> </message> <message> <source>Connecting to peers...</source> <translation>Se conecteaza cu alte noduri...</translation> </message> <message> <source>Catching up...</source> <translation>Se actualizează...</translation> </message> <message> <source>Date: %1 </source> <translation>Data: %1 </translation> </message> <message> <source>Amount: %1 </source> <translation>Sumă: %1 </translation> </message> <message> <source>Wallet: %1 </source> <translation>Portofel: %1 </translation> </message> <message> <source>Type: %1 </source> <translation>Tip: %1 </translation> </message> <message> <source>Label: %1 </source> <translation>Etichetă: %1 </translation> </message> <message> <source>Address: %1 </source> <translation>Adresă: %1 </translation> </message> <message> <source>Sent transaction</source> <translation>Tranzacţie expediată</translation> </message> <message> <source>Incoming transaction</source> <translation>Tranzacţie recepţionată</translation> </message> <message> <source>HD key generation is &lt;b&gt;enabled&lt;/b&gt;</source> <translation>Generarea de chei HD este &lt;b&gt;activata&lt;/b&gt;</translation> </message> <message> <source>HD key generation is &lt;b&gt;disabled&lt;/b&gt;</source> <translation>Generarea de chei HD este &lt;b&gt;dezactivata&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Portofelul este &lt;b&gt;criptat&lt;/b&gt; iar în momentul de faţă este &lt;b&gt;deblocat&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Portofelul este &lt;b&gt;criptat&lt;/b&gt; iar în momentul de faţă este &lt;b&gt;blocat&lt;/b&gt;</translation> </message> <message> <source>A fatal error occurred. Pottercoin can no longer continue safely and will quit.</source> <translation>A survenit o eroare fatală. Pottercoin nu mai poate continua în siguranţă şi se va opri.</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <source>Coin Selection</source> <translation>Selectarea monedei</translation> </message> <message> <source>Quantity:</source> <translation>Cantitate:</translation> </message> <message> <source>Bytes:</source> <translation>Octeţi:</translation> </message> <message> <source>Amount:</source> <translation>Sumă:</translation> </message> <message> <source>Fee:</source> <translation>Taxă:</translation> </message> <message> <source>Dust:</source> <translation>Praf:</translation> </message> <message> <source>After Fee:</source> <translation>După taxă:</translation> </message> <message> <source>Change:</source> <translation>Schimb:</translation> </message> <message> <source>(un)select all</source> <translation>(de)selectare tot</translation> </message> <message> <source>Tree mode</source> <translation>Mod arbore</translation> </message> <message> <source>List mode</source> <translation>Mod listă</translation> </message> <message> <source>Amount</source> <translation>Sumă</translation> </message> <message> <source>Received with label</source> <translation>Primite cu eticheta</translation> </message> <message> <source>Received with address</source> <translation>Primite cu adresa</translation> </message> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Confirmations</source> <translation>Confirmări</translation> </message> <message> <source>Confirmed</source> <translation>Confirmat</translation> </message> <message> <source>Copy address</source> <translation>Copiază adresa</translation> </message> <message> <source>Copy label</source> <translation>Copiază eticheta</translation> </message> <message> <source>Copy amount</source> <translation>Copiază suma</translation> </message> <message> <source>Copy transaction ID</source> <translation>Copiază ID tranzacţie</translation> </message> <message> <source>Lock unspent</source> <translation>Blocare necheltuiţi</translation> </message> <message> <source>Unlock unspent</source> <translation>Deblocare necheltuiţi</translation> </message> <message> <source>Copy quantity</source> <translation>Copiază cantitea</translation> </message> <message> <source>Copy fee</source> <translation>Copiază taxa</translation> </message> <message> <source>Copy after fee</source> <translation>Copiază după taxă</translation> </message> <message> <source>Copy bytes</source> <translation>Copiază octeţi</translation> </message> <message> <source>Copy dust</source> <translation>Copiază praf</translation> </message> <message> <source>Copy change</source> <translation>Copiază rest</translation> </message> <message> <source>yes</source> <translation>da</translation> </message> <message> <source>no</source> <translation>nu</translation> </message> <message> <source>This label turns red if any recipient receives an amount smaller than the current dust threshold.</source> <translation>Această etichetă devine roşie, dacă orice beneficiar primeşte o sumă mai mică decât pragul curent pentru praf.</translation> </message> <message> <source>Can vary +/- %1 satoshi(s) per input.</source> <translation>Poate varia +/- %1 satoshi pentru fiecare intrare.</translation> </message> <message> <source>(no label)</source> <translation>(fără etichetă)</translation> </message> <message> <source>change from %1 (%2)</source> <translation>restul de la %1 (%2)</translation> </message> <message> <source>(change)</source> <translation>(rest)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <source>Edit Address</source> <translation>Editează adresa</translation> </message> <message> <source>&amp;Label</source> <translation>&amp;Etichetă</translation> </message> <message> <source>The label associated with this address list entry</source> <translation>Eticheta asociată cu această intrare din listă.</translation> </message> <message> <source>The address associated with this address list entry. This can only be modified for sending addresses.</source> <translation>Adresa asociată cu această adresă din listă. Aceasta poate fi modificată doar pentru adresele de trimitere.</translation> </message> <message> <source>&amp;Address</source> <translation>&amp;Adresă</translation> </message> <message> <source>New sending address</source> <translation>Noua adresă de trimitere</translation> </message> <message> <source>Edit receiving address</source> <translation>Editează adresa de primire</translation> </message> <message> <source>Edit sending address</source> <translation>Editează adresa de trimitere</translation> </message> <message> <source>The entered address "%1" is not a valid Pottercoin address.</source> <translation>Adresa introdusă "%1" nu este o adresă Pottercoin validă.</translation> </message> <message> <source>Address "%1" already exists as a receiving address with label "%2" and so cannot be added as a sending address.</source> <translation>Adresa "%1" exista deja ca si adresa de primire cu eticheta "%2" si deci nu poate fi folosita ca si adresa de trimitere.</translation> </message> <message> <source>The entered address "%1" is already in the address book with label "%2".</source> <translation>Adresa introdusa "%1" este deja in lista de adrese cu eticheta "%2"</translation> </message> <message> <source>Could not unlock wallet.</source> <translation>Portofelul nu a putut fi deblocat.</translation> </message> <message> <source>New key generation failed.</source> <translation>Generarea noii chei nu a reuşit.</translation> </message> </context> <context> <name>FreespaceChecker</name> <message> <source>A new data directory will be created.</source> <translation>Va fi creat un nou dosar de date.</translation> </message> <message> <source>name</source> <translation>nume</translation> </message> <message> <source>Directory already exists. Add %1 if you intend to create a new directory here.</source> <translation>Dosarul deja există. Adaugă %1 dacă intenţionaţi să creaţi un nou dosar aici.</translation> </message> <message> <source>Path already exists, and is not a directory.</source> <translation>Calea deja există şi nu este un dosar.</translation> </message> <message> <source>Cannot create data directory here.</source> <translation>Nu se poate crea un dosar de date aici.</translation> </message> </context> <context> <name>HelpMessageDialog</name> <message> <source>version</source> <translation>versiunea</translation> </message> <message> <source>(%1-bit)</source> <translation>(%1-bit)</translation> </message> <message> <source>About %1</source> <translation>Despre %1</translation> </message> <message> <source>Command-line options</source> <translation>Opţiuni linie de comandă</translation> </message> </context> <context> <name>Intro</name> <message> <source>Welcome</source> <translation>Bun venit</translation> </message> <message> <source>Welcome to %1.</source> <translation>Bun venit la %1!</translation> </message> <message> <source>As this is the first time the program is launched, you can choose where %1 will store its data.</source> <translation>Deoarece este prima lansare a programului poți alege unde %1 va stoca datele sale.</translation> </message> <message> <source>When you click OK, %1 will begin to download and process the full %4 block chain (%2GB) starting with the earliest transactions in %3 when %4 initially launched.</source> <translation>Cand apasati OK, %1 va incepe descarcarea si procesarea intregului %4 blockchain (%2GB) incepand cu cele mai vechi tranzactii din %3 de la lansarea initiala a %4.</translation> </message> <message> <source>This initial synchronisation is very demanding, and may expose hardware problems with your computer that had previously gone unnoticed. Each time you run %1, it will continue downloading where it left off.</source> <translation>Sincronizarea initiala necesita foarte multe resurse, si poate releva probleme de hardware ale computerului care anterior au trecut neobservate. De fiecare data cand rulati %1, descarcarea va continua de unde a fost intrerupta.</translation> </message> <message> <source>If you have chosen to limit block chain storage (pruning), the historical data must still be downloaded and processed, but will be deleted afterward to keep your disk usage low.</source> <translation>Daca ati ales o limita pentru capacitatea de stocare a blockchainului (pruning), datele mai vechi tot trebuie sa fie descarcate si procesate, insa vor fi sterse ulterior pentru a reduce utilizarea harddiskului.</translation> </message> <message> <source>Use the default data directory</source> <translation>Foloseşte dosarul de date implicit</translation> </message> <message> <source>Use a custom data directory:</source> <translation>Foloseşte un dosar de date personalizat:</translation> </message> <message> <source>Pottercoin</source> <translation>Pottercoin</translation> </message> <message> <source>At least %1 GB of data will be stored in this directory, and it will grow over time.</source> <translation>Cel putin %1GB de date vor fi stocate in acest director, si aceasta valoare va creste in timp.</translation> </message> <message> <source>Approximately %1 GB of data will be stored in this directory.</source> <translation>Aproximativ %1 GB de date vor fi stocate in acest director.</translation> </message> <message> <source>%1 will download and store a copy of the Pottercoin block chain.</source> <translation>%1 va descarca si stoca o copie a blockchainului Pottercoin</translation> </message> <message> <source>The wallet will also be stored in this directory.</source> <translation>Portofelul va fi de asemeni stocat in acest director.</translation> </message> <message> <source>Error: Specified data directory "%1" cannot be created.</source> <translation>Eroare: Directorul datelor specificate "%1" nu poate fi creat.</translation> </message> <message> <source>Error</source> <translation>Eroare</translation> </message> <message numerus="yes"> <source>%n GB of free space available</source> <translation><numerusform>%n GB de spaţiu liber disponibil</numerusform><numerusform>%n GB de spaţiu liber disponibil</numerusform><numerusform>%n GB de spaţiu liber disponibil</numerusform></translation> </message> <message numerus="yes"> <source>(of %n GB needed)</source> <translation><numerusform>(din %n GB necesar)</numerusform><numerusform>(din %n GB necesari)</numerusform><numerusform>(din %n GB necesari)</numerusform></translation> </message> </context> <context> <name>ModalOverlay</name> <message> <source>Form</source> <translation>Form</translation> </message> <message> <source>Recent transactions may not yet be visible, and therefore your wallet's balance might be incorrect. This information will be correct once your wallet has finished synchronizing with the pottercoin network, as detailed below.</source> <translation>Tranzactiile recente pot sa nu fie inca vizibile, de aceea balanta portofelului poate fi incorecta. Aceasta informatie va fi corecta de indata ce portofelul va fi complet sincronizat cu reteaua Pottercoin, asa cum este detaliat mai jos.</translation> </message> <message> <source>Attempting to spend pottercoins that are affected by not-yet-displayed transactions will not be accepted by the network.</source> <translation>Incercarea de a cheltui pottercoini care sunt afectati de tranzactii ce inca nu sunt afisate nu va fi acceptata de retea.</translation> </message> <message> <source>Number of blocks left</source> <translation>Numarul de blocuri ramase</translation> </message> <message> <source>Unknown...</source> <translation>Necunoscut...</translation> </message> <message> <source>Last block time</source> <translation>Data ultimului bloc</translation> </message> <message> <source>Progress</source> <translation>Progres</translation> </message> <message> <source>Progress increase per hour</source> <translation>Cresterea progresului per ora</translation> </message> <message> <source>calculating...</source> <translation>calculeaza...</translation> </message> <message> <source>Estimated time left until synced</source> <translation>Timp estimat pana la sincronizare</translation> </message> <message> <source>Hide</source> <translation>Ascunde</translation> </message> </context> <context> <name>OpenURIDialog</name> <message> <source>Open URI</source> <translation>Deschide URI</translation> </message> <message> <source>Open payment request from URI or file</source> <translation>Deschideţi cerere de plată prin intermediul adresei URI sau a fişierului</translation> </message> <message> <source>URI:</source> <translation>URI:</translation> </message> <message> <source>Select payment request file</source> <translation>Selectaţi fişierul cerere de plată</translation> </message> <message> <source>Select payment request file to open</source> <translation>Selectati care fisier de cerere de plata va fi deschis</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <source>Options</source> <translation>Opţiuni</translation> </message> <message> <source>&amp;Main</source> <translation>Principal</translation> </message> <message> <source>Automatically start %1 after logging in to the system.</source> <translation>Porneşte automat %1 după logarea in sistem.</translation> </message> <message> <source>&amp;Start %1 on system login</source> <translation>&amp;Porneste %1 la logarea in sistem.</translation> </message> <message> <source>Size of &amp;database cache</source> <translation>Mărimea bazei de &amp;date cache</translation> </message> <message> <source>Number of script &amp;verification threads</source> <translation>Numărul de thread-uri de &amp;verificare</translation> </message> <message> <source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source> <translation>Adresa IP a serverului proxy (de exemplu: IPv4: 127.0.0.1 / IPv6: ::1)</translation> </message> <message> <source>Shows if the supplied default SOCKS5 proxy is used to reach peers via this network type.</source> <translation>Arata daca proxy-ul SOCKS5 furnizat implicit este folosit pentru a gasi parteneri via acest tip de retea.</translation> </message> <message> <source>Use separate SOCKS&amp;5 proxy to reach peers via Tor hidden services:</source> <translation>Foloseste un proxy SOCKS&amp;5 separat pentru a gasi parteneri via servicii TOR ascunse</translation> </message> <message> <source>Hide the icon from the system tray.</source> <translation>Ascunde icon-ul din system tray.</translation> </message> <message> <source>&amp;Hide tray icon</source> <translation>&amp;Ascunde icon-ul din system tray.</translation> </message> <message> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Exit in the menu.</source> <translation>Minimizează fereastra în locul părăsirii programului în momentul închiderii ferestrei. Cînd acestă opţiune e activă, aplicaţia se va opri doar în momentul selectării comenzii 'Închide aplicaţia' din menu.</translation> </message> <message> <source>Third party URLs (e.g. a block explorer) that appear in the transactions tab as context menu items. %s in the URL is replaced by transaction hash. Multiple URLs are separated by vertical bar |.</source> <translation>URL-uri terţe părţi (de exemplu, un explorator de bloc), care apar în tab-ul tranzacţiilor ca elemente de meniu contextual. %s în URL este înlocuit cu hash de tranzacţie. URL-urile multiple sînt separate prin bară verticală |.</translation> </message> <message> <source>Open the %1 configuration file from the working directory.</source> <translation>Deschide fisierul de configurare %1 din directorul curent.</translation> </message> <message> <source>Open Configuration File</source> <translation>Deschide fisierul de configurare.</translation> </message> <message> <source>Reset all client options to default.</source> <translation>Resetează toate setările clientului la valorile implicite.</translation> </message> <message> <source>&amp;Reset Options</source> <translation>&amp;Resetează opţiunile</translation> </message> <message> <source>&amp;Network</source> <translation>Reţea</translation> </message> <message> <source>Disables some advanced features but all blocks will still be fully validated. Reverting this setting requires re-downloading the entire blockchain. Actual disk usage may be somewhat higher.</source> <translation>Dezactiveaza unele caracteristici avansate insa toate blocurile vor fi validate pe deplin. Inversarea acestei setari necesita re-descarcarea intregului blockchain. Utilizarea reala a discului poate fi ceva mai mare.</translation> </message> <message> <source>Prune &amp;block storage to</source> <translation>Reductie &amp;block storage la </translation> </message> <message> <source>GB</source> <translation>GB</translation> </message> <message> <source>Reverting this setting requires re-downloading the entire blockchain.</source> <translation> Inversarea acestei setari necesita re-descarcarea intregului blockchain.</translation> </message> <message> <source>(0 = auto, &lt;0 = leave that many cores free)</source> <translation>(0 = automat, &lt;0 = lasă atîtea nuclee libere)</translation> </message> <message> <source>W&amp;allet</source> <translation>Portofel</translation> </message> <message> <source>Expert</source> <translation>Expert</translation> </message> <message> <source>Enable coin &amp;control features</source> <translation>Activare caracteristici de control ale monedei</translation> </message> <message> <source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source> <translation>Dacă dezactivaţi cheltuirea restului neconfirmat, restul dintr-o tranzacţie nu poate fi folosit pînă cînd tranzacţia are cel puţin o confirmare. Aceasta afectează de asemenea calcularea soldului.</translation> </message> <message> <source>&amp;Spend unconfirmed change</source> <translation>Cheltuire rest neconfirmat</translation> </message> <message> <source>Automatically open the Pottercoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Deschide automat în router portul aferent clientului Pottercoin. Funcţionează doar dacă routerul duportă UPnP şi e activat.</translation> </message> <message> <source>Map port using &amp;UPnP</source> <translation>Mapare port folosind &amp;UPnP</translation> </message> <message> <source>Accept connections from outside.</source> <translation>Acceptă conexiuni din exterior</translation> </message> <message> <source>Allow incomin&amp;g connections</source> <translation>Permite conexiuni de intrar&amp;e</translation> </message> <message> <source>Connect to the Pottercoin network through a SOCKS5 proxy.</source> <translation>Conectare la reţeaua Pottercoin printr-un proxy SOCKS.</translation> </message> <message> <source>&amp;Connect through SOCKS5 proxy (default proxy):</source> <translation>&amp;Conectare printr-un proxy SOCKS (implicit proxy):</translation> </message> <message> <source>Proxy &amp;IP:</source> <translation>Proxy &amp;IP:</translation> </message> <message> <source>&amp;Port:</source> <translation>&amp;Port:</translation> </message> <message> <source>Port of the proxy (e.g. 9050)</source> <translation>Portul proxy (de exemplu: 9050)</translation> </message> <message> <source>Used for reaching peers via:</source> <translation>Folosit pentru a gasi parteneri via:</translation> </message> <message> <source>IPv4</source> <translation>IPv4</translation> </message> <message> <source>IPv6</source> <translation>IPv6</translation> </message> <message> <source>Tor</source> <translation>Tor</translation> </message> <message> <source>Connect to the Pottercoin network through a separate SOCKS5 proxy for Tor hidden services.</source> <translation>Conectare la reteaua Pottercoin printr-un proxy SOCKS5 separat pentru serviciile TOR ascunse.</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;Fereastră</translation> </message> <message> <source>Show only a tray icon after minimizing the window.</source> <translation>Arată doar un icon în tray la ascunderea ferestrei</translation> </message> <message> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimizare în tray în loc de taskbar</translation> </message> <message> <source>M&amp;inimize on close</source> <translation>M&amp;inimizare fereastră în locul închiderii programului</translation> </message> <message> <source>&amp;Display</source> <translation>&amp;Afişare</translation> </message> <message> <source>User Interface &amp;language:</source> <translation>&amp;Limbă interfaţă utilizator</translation> </message> <message> <source>The user interface language can be set here. This setting will take effect after restarting %1.</source> <translation>Limba interfeţei utilizatorului poate fi setată aici. Această setare va avea efect după repornirea %1.</translation> </message> <message> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Unitatea de măsură pentru afişarea sumelor:</translation> </message> <message> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Alegeţi subdiviziunea folosită la afişarea interfeţei şi la trimiterea de pottercoin.</translation> </message> <message> <source>Whether to show coin control features or not.</source> <translation>Arată controlul caracteristicilor monedei sau nu.</translation> </message> <message> <source>&amp;Third party transaction URLs</source> <translation>URL-uri tranzacţii &amp;terţe părţi</translation> </message> <message> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <source>&amp;Cancel</source> <translation>Renunţă</translation> </message> <message> <source>default</source> <translation>iniţial</translation> </message> <message> <source>none</source> <translation>nimic</translation> </message> <message> <source>Confirm options reset</source> <translation>Confirmă resetarea opţiunilor</translation> </message> <message> <source>Client restart required to activate changes.</source> <translation>Este necesară repornirea clientului pentru a activa schimbările.</translation> </message> <message> <source>Client will be shut down. Do you want to proceed?</source> <translation>Clientul va fi închis. Doriţi să continuaţi?</translation> </message> <message> <source>Configuration options</source> <translation>Optiuni de configurare</translation> </message> <message> <source>The configuration file is used to specify advanced user options which override GUI settings. Additionally, any command-line options will override this configuration file.</source> <translation>Fisierul de configurare e folosit pentru a specifica optiuni utilizator avansate care modifica setarile din GUI. In plus orice optiune din linia de comanda va modifica acest fisier de configurare. </translation> </message> <message> <source>Error</source> <translation>Eroare</translation> </message> <message> <source>The configuration file could not be opened.</source> <translation>Fisierul de configurare nu a putut fi deschis.</translation> </message> <message> <source>This change would require a client restart.</source> <translation>Această schimbare necesită o repornire a clientului.</translation> </message> <message> <source>The supplied proxy address is invalid.</source> <translation>Adresa pottercoin pe care aţi specificat-o nu este validă.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <source>Form</source> <translation>Form</translation> </message> <message> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Pottercoin network after a connection is established, but this process has not completed yet.</source> <translation>Informaţiile afişate pot fi neactualizate. Portofelul dvs. se sincronizează automat cu reţeaua Pottercoin după ce o conexiune este stabilită, dar acest proces nu a fost finalizat încă.</translation> </message> <message> <source>Watch-only:</source> <translation>Doar-supraveghere:</translation> </message> <message> <source>Available:</source> <translation>Disponibil:</translation> </message> <message> <source>Your current spendable balance</source> <translation>Balanţa dvs. curentă de cheltuieli</translation> </message> <message> <source>Pending:</source> <translation>În aşteptare:</translation> </message> <message> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source> <translation>Totalul tranzacţiilor care nu sunt confirmate încă şi care nu sunt încă adunate la balanţa de cheltuieli</translation> </message> <message> <source>Immature:</source> <translation>Nematurizat:</translation> </message> <message> <source>Mined balance that has not yet matured</source> <translation>Balanţa minata ce nu s-a maturizat încă</translation> </message> <message> <source>Balances</source> <translation>Balanţă</translation> </message> <message> <source>Total:</source> <translation>Total:</translation> </message> <message> <source>Your current total balance</source> <translation>Balanţa totală curentă</translation> </message> <message> <source>Your current balance in watch-only addresses</source> <translation>Soldul dvs. curent în adresele doar-supraveghere</translation> </message> <message> <source>Spendable:</source> <translation>Cheltuibil:</translation> </message> <message> <source>Recent transactions</source> <translation>Tranzacţii recente</translation> </message> <message> <source>Unconfirmed transactions to watch-only addresses</source> <translation>Tranzacţii neconfirmate la adresele doar-supraveghere</translation> </message> <message> <source>Mined balance in watch-only addresses that has not yet matured</source> <translation>Balanţă minată în adresele doar-supraveghere care nu s-a maturizat încă</translation> </message> <message> <source>Current total balance in watch-only addresses</source> <translation>Soldul dvs. total în adresele doar-supraveghere</translation> </message> </context> <context> <name>PaymentServer</name> <message> <source>Payment request error</source> <translation>Eroare la cererea de plată</translation> </message> <message> <source>Cannot start pottercoin: click-to-pay handler</source> <translation>Pottercoin nu poate porni: click-to-pay handler</translation> </message> <message> <source>URI handling</source> <translation>Gestionare URI</translation> </message> <message> <source>'pottercoin://' is not a valid URI. Use 'pottercoin:' instead.</source> <translation>'pottercoin://' nu este un URI valid. Folositi 'pottercoin:' in loc.</translation> </message> <message> <source>Payment request fetch URL is invalid: %1</source> <translation>URL-ul cererii de plată preluat nu este valid: %1</translation> </message> <message> <source>Invalid payment address %1</source> <translation>Adresă pentru plată invalidă %1</translation> </message> <message> <source>URI cannot be parsed! This can be caused by an invalid Pottercoin address or malformed URI parameters.</source> <translation>URI nu poate fi analizat! Acest lucru poate fi cauzat de o adresă Pottercoin invalidă sau parametri URI deformaţi.</translation> </message> <message> <source>Payment request file handling</source> <translation>Manipulare fişier cerere de plată</translation> </message> <message> <source>Payment request file cannot be read! This can be caused by an invalid payment request file.</source> <translation>Fişierul cerere de plată nu poate fi citit! Cauza poate fi un fişier cerere de plată nevalid.</translation> </message> <message> <source>Payment request rejected</source> <translation>Cerere de plată refuzată</translation> </message> <message> <source>Payment request network doesn't match client network.</source> <translation>Cererea de plată din reţea nu se potriveşte cu clientul din reţea</translation> </message> <message> <source>Payment request expired.</source> <translation>Cerere de plată expirata</translation> </message> <message> <source>Payment request is not initialized.</source> <translation>Cererea de plată nu este iniţializată.</translation> </message> <message> <source>Unverified payment requests to custom payment scripts are unsupported.</source> <translation>Cererile nesecurizate către scripturi personalizate de plăți nu sunt suportate</translation> </message> <message> <source>Invalid payment request.</source> <translation>Cerere de plată invalidă.</translation> </message> <message> <source>Requested payment amount of %1 is too small (considered dust).</source> <translation>Suma cerută de plată de %1 este prea mică (considerată praf).</translation> </message> <message> <source>Refund from %1</source> <translation>Rambursare de la %1</translation> </message> <message> <source>Payment request %1 is too large (%2 bytes, allowed %3 bytes).</source> <translation>Cererea de plată %1 este prea mare (%2 octeţi, permis %3 octeţi).</translation> </message> <message> <source>Error communicating with %1: %2</source> <translation>Eroare la comunicarea cu %1: %2</translation> </message> <message> <source>Payment request cannot be parsed!</source> <translation>Cererea de plată nu poate fi analizată!</translation> </message> <message> <source>Bad response from server %1</source> <translation>Răspuns greşit de la server %1</translation> </message> <message> <source>Network request error</source> <translation>Eroare în cererea de reţea</translation> </message> <message> <source>Payment acknowledged</source> <translation>Plată acceptată</translation> </message> </context> <context> <name>PeerTableModel</name> <message> <source>User Agent</source> <translation>Agent utilizator</translation> </message> <message> <source>Node/Service</source> <translation>Nod/Serviciu</translation> </message> <message> <source>NodeId</source> <translation>NodeID</translation> </message> <message> <source>Ping</source> <translation>Ping</translation> </message> <message> <source>Sent</source> <translation>Expediat</translation> </message> <message> <source>Received</source> <translation>Recepţionat</translation> </message> </context> <context> <name>QObject</name> <message> <source>Amount</source> <translation>Cantitate</translation> </message> <message> <source>Enter a Pottercoin address (e.g. %1)</source> <translation>Introduceţi o adresă Pottercoin (de exemplu %1)</translation> </message> <message> <source>%1 d</source> <translation>%1 z</translation> </message> <message> <source>%1 h</source> <translation>%1 h</translation> </message> <message> <source>%1 m</source> <translation>%1 m</translation> </message> <message> <source>%1 s</source> <translation>%1 s</translation> </message> <message> <source>None</source> <translation>Niciuna</translation> </message> <message> <source>N/A</source> <translation>N/A</translation> </message> <message> <source>%1 ms</source> <translation>%1 ms</translation> </message> <message numerus="yes"> <source>%n second(s)</source> <translation><numerusform>%n secunda</numerusform><numerusform>%n secunde</numerusform><numerusform>%n secunde</numerusform></translation> </message> <message numerus="yes"> <source>%n minute(s)</source> <translation><numerusform>%n minut</numerusform><numerusform>%n minute</numerusform><numerusform>%n minute</numerusform></translation> </message> <message numerus="yes"> <source>%n hour(s)</source> <translation><numerusform>%n ora</numerusform><numerusform>%n ore</numerusform><numerusform>%n ore</numerusform></translation> </message> <message numerus="yes"> <source>%n day(s)</source> <translation><numerusform>%n zi</numerusform><numerusform>%n zile</numerusform><numerusform>%n zile</numerusform></translation> </message> <message numerus="yes"> <source>%n week(s)</source> <translation><numerusform>%n saptamana</numerusform><numerusform>%n saptamani</numerusform><numerusform>%n saptamani</numerusform></translation> </message> <message> <source>%1 and %2</source> <translation>%1 şi %2</translation> </message> <message numerus="yes"> <source>%n year(s)</source> <translation><numerusform>%n an</numerusform><numerusform>%n ani</numerusform><numerusform>%n ani</numerusform></translation> </message> <message> <source>%1 B</source> <translation>%1 B</translation> </message> <message> <source>%1 KB</source> <translation>%1 KB</translation> </message> <message> <source>%1 MB</source> <translation>%1 MB</translation> </message> <message> <source>%1 GB</source> <translation>%1 GB</translation> </message> <message> <source>%1 didn't yet exit safely...</source> <translation>%1 nu a fost inchis in siguranta...</translation> </message> <message> <source>unknown</source> <translation>necunoscut</translation> </message> </context> <context> <name>QObject::QObject</name> <message> <source>Error parsing command line arguments: %1.</source> <translation>Eroare la analiza argumentelor linie de comanda: %1</translation> </message> <message> <source>Error: Specified data directory "%1" does not exist.</source> <translation>Eroare: Directorul de date specificat "%1" nu există.</translation> </message> <message> <source>Error: Cannot parse configuration file: %1.</source> <translation>Eroare: Nu se poate analiza fişierul de configuraţie: %1.</translation> </message> <message> <source>Error: %1</source> <translation>Eroare: %1</translation> </message> </context> <context> <name>QRImageWidget</name> <message> <source>&amp;Save Image...</source> <translation>&amp;Salvează Imaginea...</translation> </message> <message> <source>&amp;Copy Image</source> <translation>&amp;Copiaza Imaginea</translation> </message> <message> <source>Save QR Code</source> <translation>Salvează codul QR</translation> </message> <message> <source>PNG Image (*.png)</source> <translation>Imagine de tip PNG (*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <source>N/A</source> <translation>Nespecificat</translation> </message> <message> <source>Client version</source> <translation>Versiune client</translation> </message> <message> <source>&amp;Information</source> <translation>&amp;Informaţii</translation> </message> <message> <source>Debug window</source> <translation>Fereastra de depanare</translation> </message> <message> <source>General</source> <translation>General</translation> </message> <message> <source>Using BerkeleyDB version</source> <translation>Foloseşte BerkeleyDB versiunea</translation> </message> <message> <source>Datadir</source> <translation>Dirdate</translation> </message> <message> <source>Startup time</source> <translation>Ora de pornire</translation> </message> <message> <source>Network</source> <translation>Reţea</translation> </message> <message> <source>Name</source> <translation>Nume</translation> </message> <message> <source>Number of connections</source> <translation>Numărul de conexiuni</translation> </message> <message> <source>Block chain</source> <translation>Lanţ de blocuri</translation> </message> <message> <source>Current number of blocks</source> <translation>Numărul curent de blocuri</translation> </message> <message> <source>Memory Pool</source> <translation>Pool Memorie</translation> </message> <message> <source>Current number of transactions</source> <translation>Numărul curent de tranzacţii</translation> </message> <message> <source>Memory usage</source> <translation>Memorie folosită</translation> </message> <message> <source>Wallet: </source> <translation>Portofel:</translation> </message> <message> <source>(none)</source> <translation>(nimic)</translation> </message> <message> <source>&amp;Reset</source> <translation>&amp;Resetare</translation> </message> <message> <source>Received</source> <translation>Recepţionat</translation> </message> <message> <source>Sent</source> <translation>Expediat</translation> </message> <message> <source>&amp;Peers</source> <translation>&amp;Parteneri</translation> </message> <message> <source>Banned peers</source> <translation>Terti banati</translation> </message> <message> <source>Select a peer to view detailed information.</source> <translation>Selectaţi un partener pentru a vedea informaţiile detaliate.</translation> </message> <message> <source>Whitelisted</source> <translation>Whitelisted</translation> </message> <message> <source>Direction</source> <translation>Direcţie</translation> </message> <message> <source>Version</source> <translation>Versiune</translation> </message> <message> <source>Starting Block</source> <translation>Bloc de început</translation> </message> <message> <source>Synced Headers</source> <translation>Headere Sincronizate</translation> </message> <message> <source>Synced Blocks</source> <translation>Blocuri Sincronizate</translation> </message> <message> <source>User Agent</source> <translation>Agent utilizator</translation> </message> <message> <source>Open the %1 debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>Deschide fişierul jurnal depanare %1 din directorul curent. Aceasta poate dura cateva secunde pentru fişierele mai mari.</translation> </message> <message> <source>Decrease font size</source> <translation>Micsoreaza fontul</translation> </message> <message> <source>Increase font size</source> <translation>Mareste fontul</translation> </message> <message> <source>Services</source> <translation>Servicii</translation> </message> <message> <source>Ban Score</source> <translation>Scor Ban</translation> </message> <message> <source>Connection Time</source> <translation>Timp conexiune</translation> </message> <message> <source>Last Send</source> <translation>Ultima trimitere</translation> </message> <message> <source>Last Receive</source> <translation>Ultima primire</translation> </message> <message> <source>Ping Time</source> <translation>Timp ping</translation> </message> <message> <source>The duration of a currently outstanding ping.</source> <translation>Durata ping-ului intarziat.</translation> </message> <message> <source>Ping Wait</source> <translation>Asteptare ping</translation> </message> <message> <source>Min Ping</source> <translation>Min Ping</translation> </message> <message> <source>Time Offset</source> <translation>Diferenta timp</translation> </message> <message> <source>Last block time</source> <translation>Data ultimului bloc</translation> </message> <message> <source>&amp;Open</source> <translation>&amp;Deschide</translation> </message> <message> <source>&amp;Console</source> <translation>&amp;Consolă</translation> </message> <message> <source>&amp;Network Traffic</source> <translation>Trafic reţea</translation> </message> <message> <source>Totals</source> <translation>Totaluri</translation> </message> <message> <source>In:</source> <translation>Intrare:</translation> </message> <message> <source>Out:</source> <translation>Ieşire:</translation> </message> <message> <source>Debug log file</source> <translation>Fişier jurnal depanare</translation> </message> <message> <source>Clear console</source> <translation>Curăţă consola</translation> </message> <message> <source>1 &amp;hour</source> <translation>1 &amp;oră</translation> </message> <message> <source>1 &amp;day</source> <translation>1 &amp;zi</translation> </message> <message> <source>1 &amp;week</source> <translation>1 &amp;săptămână</translation> </message> <message> <source>1 &amp;year</source> <translation>1 &amp;an</translation> </message> <message> <source>&amp;Disconnect</source> <translation>&amp;Deconectare</translation> </message> <message> <source>Ban for</source> <translation>Interzicere pentru</translation> </message> <message> <source>&amp;Unban</source> <translation>&amp;Unban</translation> </message> <message> <source>Welcome to the %1 RPC console.</source> <translation>Bun venit la consola %1 RPC.</translation> </message> <message> <source>Use up and down arrows to navigate history, and %1 to clear screen.</source> <translation>Folosiţi săgetile sus şi jos pentru a naviga în istoric şi %1 pentru a curăţa ecranul.</translation> </message> <message> <source>Type %1 for an overview of available commands.</source> <translation>Tastati %1 pentru o recapitulare a comenzilor disponibile.</translation> </message> <message> <source>For more information on using this console type %1.</source> <translation>Pentru mai multe informatii despre folosirea acestei console tastati %1.</translation> </message> <message> <source>WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramifications of a command.</source> <translation>ATENTIONARE: Sunt excroci care instruiesc userii sa introduca aici comenzi, pentru a le fura continutul portofelelor. Nu folositi aceasta consolă fara a intelege pe deplin ramificatiile unei comenzi.</translation> </message> <message>
</message> <message> <source>Executing command without any wallet</source> <translation>Executarea comenzii fara nici un portofel.</translation> </message> <message> <source>Executing command using "%1" wallet</source> <translation>Executarea comenzii folosind portofelul "%1"</translation> </message> <message> <source>(node id: %1)</source> <translation>(node id: %1)</translation> </message> <message> <source>via %1</source> <translation>via %1</translation> </message> <message> <source>never</source> <translation>niciodată</translation> </message> <message> <source>Inbound</source> <translation>Intrare</translation> </message> <message> <source>Outbound</source> <translation>Ieşire</translation> </message> <message> <source>Yes</source> <translation>Da</translation> </message> <message> <source>No</source> <translation>Nu</translation> </message> <message> <source>Unknown</source> <translation>Necunoscut</translation> </message> </context> <context> <name>ReceiveCoinsDialog</name> <message> <source>&amp;Amount:</source> <translation>Sum&amp;a:</translation> </message> <message> <source>&amp;Label:</source> <translation>&amp;Etichetă:</translation> </message> <message> <source>&amp;Message:</source> <translation>&amp;Mesaj:</translation> </message> <message> <source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Pottercoin network.</source> <translation>Un mesaj opţional de ataşat la cererea de plată, care va fi afişat cînd cererea este deschisă. Notă: Acest mesaj nu va fi trimis cu plata către reţeaua Pottercoin.</translation> </message> <message> <source>An optional label to associate with the new receiving address.</source> <translation>O etichetă opţională de asociat cu adresa de primire.</translation> </message> <message> <source>Use this form to request payments. All fields are &lt;b&gt;optional&lt;/b&gt;.</source> <translation>Foloseşte acest formular pentru a solicita plăţi. Toate cîmpurile sînt &lt;b&gt;opţionale&lt;/b&gt;.</translation> </message> <message> <source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source> <translation>O sumă opţională de cerut. Lăsaţi gol sau zero pentru a nu cere o sumă anume.</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>Curăţă toate cîmpurile formularului.</translation> </message> <message> <source>Clear</source> <translation>Curăţă</translation> </message> <message> <source>Native segwit addresses (aka Bech32 or BIP-173) reduce your transaction fees later on and offer better protection against typos, but old wallets don't support them. When unchecked, an address compatible with older wallets will be created instead.</source> <translation>Adresele native segwit (aka Bech32 sau BIP-173) vor reduce mai tarziu comisioanele de tranzactionare si vor oferi o mai buna protectie impotriva introducerii gresite, dar portofelele vechi nu sunt compatibile. Daca optiunea nu e bifata, se va crea o adresa compatibila cu portofelele vechi.</translation> </message> <message> <source>Generate native segwit (Bech32) address</source> <translation>Genereaza adresa nativa segwit (Bech32)</translation> </message> <message> <source>Requested payments history</source> <translation>Istoricul plăţilor cerute</translation> </message> <message> <source>&amp;Request payment</source> <translation>&amp;Cerere plată</translation> </message> <message> <source>Show the selected request (does the same as double clicking an entry)</source> <translation>Arată cererea selectată (acelaşi lucru ca şi dublu-clic pe o înregistrare)</translation> </message> <message> <source>Show</source> <translation>Arată</translation> </message> <message> <source>Remove the selected entries from the list</source> <translation>Înlătură intrările selectate din listă</translation> </message> <message> <source>Remove</source> <translation>Înlătură</translation> </message> <message> <source>Copy URI</source> <translation>Copiază URl</translation> </message> <message> <source>Copy label</source> <translation>Copiază eticheta</translation> </message> <message> <source>Copy message</source> <translation>Copiază mesajul</translation> </message> <message> <source>Copy amount</source> <translation>Copiază suma</translation> </message> </context> <context> <name>ReceiveRequestDialog</name> <message> <source>QR Code</source> <translation>Cod QR</translation> </message> <message> <source>Copy &amp;URI</source> <translation>Copiază &amp;URl</translation> </message> <message> <source>Copy &amp;Address</source> <translation>Copiază &amp;adresa</translation> </message> <message> <source>&amp;Save Image...</source> <translation>&amp;Salvează imaginea...</translation> </message> <message> <source>Request payment to %1</source> <translation>Cere plata pentru %1</translation> </message> <message> <source>Payment information</source> <translation>Informaţiile plată</translation> </message> <message> <source>URI</source> <translation>URI</translation> </message> <message> <source>Address</source> <translation>Adresă</translation> </message> <message> <source>Amount</source> <translation>Cantitate</translation> </message> <message> <source>Label</source> <translation>Etichetă</translation> </message> <message> <source>Message</source> <translation>Mesaj</translation> </message> <message> <source>Wallet</source> <translation>Portofel</translation> </message> <message> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>URI rezultat este prea lung, încearcă să reduci textul pentru etichetă / mesaj.</translation> </message> <message> <source>Error encoding URI into QR Code.</source> <translation>Eroare la codarea URl-ului în cod QR.</translation> </message> </context> <context> <name>RecentRequestsTableModel</name> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Label</source> <translation>Etichetă</translation> </message> <message> <source>Message</source> <translation>Mesaj</translation> </message> <message> <source>(no label)</source> <translation>(fără etichetă)</translation> </message> <message> <source>(no message)</source> <translation>(nici un mesaj)</translation> </message> <message> <source>(no amount requested)</source> <translation>(nici o sumă solicitată)</translation> </message> <message> <source>Requested</source> <translation>Ceruta</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <source>Send Coins</source> <translation>Trimite monede</translation> </message> <message> <source>Coin Control Features</source> <translation>Caracteristici de control ale monedei</translation> </message> <message> <source>Inputs...</source> <translation>Intrări...</translation> </message> <message> <source>automatically selected</source> <translation>selecţie automată</translation> </message> <message> <source>Insufficient funds!</source> <translation>Fonduri insuficiente!</translation> </message> <message> <source>Quantity:</source> <translation>Cantitate:</translation> </message> <message> <source>Bytes:</source> <translation>Octeţi:</translation> </message> <message> <source>Amount:</source> <translation>Sumă:</translation> </message> <message> <source>Fee:</source> <translation>Comision:</translation> </message> <message> <source>After Fee:</source> <translation>După taxă:</translation> </message> <message> <source>Change:</source> <translation>Rest:</translation> </message> <message> <source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source> <translation>Dacă este activat, dar adresa de rest este goală sau nevalidă, restul va fi trimis la o adresă nou generată.</translation> </message> <message> <source>Custom change address</source> <translation>Adresă personalizată de rest</translation> </message> <message> <source>Transaction Fee:</source> <translation>Taxă tranzacţie:</translation> </message> <message> <source>Choose...</source> <translation>Alegeţi...</translation> </message> <message> <source>Using the fallbackfee can result in sending a transaction that will take several hours or days (or never) to confirm. Consider choosing your fee manually or wait until you have validated the complete chain.</source> <translation>Folosirea taxei implicite poate rezulta in trimiterea unei tranzactii care va dura cateva ore sau zile (sau niciodata) pentru a fi confirmata. Luati in considerare sa setati manual taxa sau asteptati pana ati validat complet lantul.</translation> </message> <message> <source>Warning: Fee estimation is currently not possible.</source> <translation>Avertisment: Estimarea comisionului nu s-a putut efectua.</translation> </message> <message> <source>collapse fee-settings</source> <translation>inchide setarile de taxare</translation> </message> <message> <source>Specify a custom fee per kB (1,000 bytes) of the transaction's virtual size. Note: Since the fee is calculated on a per-byte basis, a fee of "100 satoshis per kB" for a transaction size of 500 bytes (half of 1 kB) would ultimately yield a fee of only 50 satoshis.</source> <translation>Specificati o taxa anume pe kB (1000 byte) din marimea virtuala a tranzactiei. Nota: Cum taxa este calculata per byte, o taxa de "100 satoshi per kB" pentru o tranzactie de 500 byte (jumatate de kB) va produce o taxa de doar 50 satoshi.</translation> </message> <message> <source>per kilobyte</source> <translation>per kilooctet</translation> </message> <message> <source>Hide</source> <translation>Ascunde</translation> </message> <message> <source>Recommended:</source> <translation>Recomandat:</translation> </message> <message> <source>Custom:</source> <translation>Personalizat:</translation> </message> <message> <source>(Smart fee not initialized yet. This usually takes a few blocks...)</source> <translation>(Taxa smart nu este inca initializata. Aceasta poate dura cateva blocuri...)</translation> </message> <message> <source>Send to multiple recipients at once</source> <translation>Trimite simultan către mai mulţi destinatari</translation> </message> <message> <source>Add &amp;Recipient</source> <translation>Adaugă destinata&amp;r</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>Şterge toate câmpurile formularului.</translation> </message> <message> <source>Dust:</source> <translation>Praf:</translation> </message> <message> <source>Confirmation time target:</source> <translation>Timp confirmare tinta:</translation> </message> <message> <source>Enable Replace-By-Fee</source> <translation>Autorizeaza Replace-By-Fee</translation> </message> <message> <source>With Replace-By-Fee (BIP-125) you can increase a transaction's fee after it is sent. Without this, a higher fee may be recommended to compensate for increased transaction delay risk.</source> <translation>Cu Replace-By-Fee (BIP-125) se poate creste taxa unei tranzactii dupa ce a fost trimisa. Fara aceasta optiune, o taxa mai mare e posibil sa fie recomandata pentru a compensa riscul crescut de intarziere a tranzactiei.</translation> </message> <message> <source>Clear &amp;All</source> <translation>Curăţă to&amp;ate</translation> </message> <message> <source>Balance:</source> <translation>Balanţă:</translation> </message> <message> <source>Confirm the send action</source> <translation>Confirmă operaţiunea de trimitere</translation> </message> <message> <source>S&amp;end</source> <translation>Trimit&amp;e</translation> </message> <message> <source>Copy quantity</source> <translation>Copiază cantitea</translation> </message> <message> <source>Copy amount</source> <translation>Copiază suma</translation> </message> <message> <source>Copy fee</source> <translation>Copiază taxa</translation> </message> <message> <source>Copy after fee</source> <translation>Copiază după taxă</translation> </message> <message> <source>Copy bytes</source> <translation>Copiază octeţi</translation> </message> <message> <source>Copy dust</source> <translation>Copiază praf</translation> </message> <message> <source>Copy change</source> <translation>Copiază rest</translation> </message> <message> <source>%1 (%2 blocks)</source> <translation>%1(%2 blocuri)</translation> </message> <message> <source>%1 to %2</source> <translation>%1 la %2</translation> </message> <message> <source>Are you sure you want to send?</source> <translation>Sigur doriţi să trimiteţi?</translation> </message> <message> <source>or</source> <translation>sau</translation> </message> <message> <source>You can increase the fee later (signals Replace-By-Fee, BIP-125).</source> <translation>Puteti creste taxa mai tarziu (semnaleaza Replace-By-Fee, BIP-125).</translation> </message> <message> <source>from wallet %1</source> <translation>de la portofelul %1</translation> </message> <message> <source>Please, review your transaction.</source> <translation>Va rugam sa revizuiti tranzactia.</translation> </message> <message> <source>Transaction fee</source> <translation>Taxă tranzacţie</translation> </message> <message> <source>Not signalling Replace-By-Fee, BIP-125.</source> <translation>Nu se semnalizeaza Replace-By-Fee, BIP-125</translation> </message> <message> <source>Total Amount</source> <translation>Suma totală</translation> </message> <message> <source>Confirm send coins</source> <translation>Confirmă trimiterea monedelor</translation> </message> <message> <source>The recipient address is not valid. Please recheck.</source> <translation>Adresa destinatarului nu este validă. Rugăm să reverificaţi.</translation> </message> <message> <source>The amount to pay must be larger than 0.</source> <translation>Suma de plată trebuie să fie mai mare decît 0.</translation> </message> <message> <source>The amount exceeds your balance.</source> <translation>Suma depăşeşte soldul contului.</translation> </message> <message> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>Totalul depăşeşte soldul contului dacă se include şi plata taxei de %1.</translation> </message> <message> <source>Duplicate address found: addresses should only be used once each.</source> <translation>Adresă duplicat găsită: fiecare adresă ar trebui folosită o singură dată.</translation> </message> <message> <source>Transaction creation failed!</source> <translation>Creare tranzacţie nereuşită!</translation> </message> <message> <source>The transaction was rejected with the following reason: %1</source> <translation>Tranzactia a fost refuzata pentru urmatorul motiv: %1</translation> </message> <message> <source>A fee higher than %1 is considered an absurdly high fee.</source> <translation> O taxă mai mare de %1 este considerată o taxă absurd de mare </translation> </message> <message> <source>Payment request expired.</source> <translation>Cerere de plată expirata</translation> </message> <message numerus="yes"> <source>Estimated to begin confirmation within %n block(s).</source> <translation><numerusform>Se estimeaza inceperea confirmarii in %n bloc.</numerusform><numerusform>Se estimeaza inceperea confirmarii in %n blocuri.</numerusform><numerusform>Se estimeaza inceperea confirmarii in %n blocuri.</numerusform></translation> </message> <message> <source>Warning: Invalid Pottercoin address</source> <translation>Atenţie: Adresa pottercoin nevalidă!</translation> </message> <message> <source>Warning: Unknown change address</source> <translation>Atenţie: Adresă de rest necunoscută</translation> </message> <message> <source>Confirm custom change address</source> <translation>Confirmati adresa personalizata de rest</translation> </message> <message> <source>The address you selected for change is not part of this wallet. Any or all funds in your wallet may be sent to this address. Are you sure?</source> <translation>Adresa selectata pentru rest nu face parte din acest portofel. Orice suma, sau intreaga suma din portofel poate fi trimisa la aceasta adresa. Sunteti sigur?</translation> </message> <message> <source>(no label)</source> <translation>(fără etichetă)</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <source>A&amp;mount:</source> <translation>Su&amp;mă:</translation> </message> <message> <source>Pay &amp;To:</source> <translation>Plăteşte că&amp;tre:</translation> </message> <message> <source>&amp;Label:</source> <translation>&amp;Etichetă:</translation> </message> <message> <source>Choose previously used address</source> <translation>Alegeţi adrese folosite anterior</translation> </message> <message> <source>This is a normal payment.</source> <translation>Aceasta este o tranzacţie normală.</translation> </message> <message> <source>The Pottercoin address to send the payment to</source> <translation>Adresa pottercoin către care se face plata</translation> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>Lipeşte adresa din clipboard</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Remove this entry</source> <translation>Înlătură această intrare</translation> </message> <message> <source>The fee will be deducted from the amount being sent. The recipient will receive less pottercoins than you enter in the amount field. If multiple recipients are selected, the fee is split equally.</source> <translation>Taxa va fi scazuta in suma trimisa. Destinatarul va primi mai putini pottercoin decat ati specificat in campul sumei trimise. Daca au fost selectati mai multi destinatari, taxa se va imparti in mod egal.</translation> </message> <message> <source>S&amp;ubtract fee from amount</source> <translation>S&amp;cade taxa din suma</translation> </message> <message> <source>Use available balance</source> <translation>Folosește balanța disponibilă</translation> </message> <message> <source>Message:</source> <translation>Mesaj:</translation> </message> <message> <source>This is an unauthenticated payment request.</source> <translation>Aceasta este o cerere de plata neautentificata.</translation> </message> <message> <source>This is an authenticated payment request.</source> <translation>Aceasta este o cerere de plata autentificata.</translation> </message> <message> <source>Enter a label for this address to add it to the list of used addresses</source> <translation>Introduceţi eticheta pentru ca această adresa să fie introdusă în lista de adrese folosite</translation> </message> <message> <source>A message that was attached to the pottercoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Pottercoin network.</source> <translation>un mesaj a fost ataşat la pottercoin: URI care va fi stocat cu tranzacţia pentru referinţa dvs. Notă: Acest mesaj nu va fi trimis către reţeaua pottercoin.</translation> </message> <message> <source>Pay To:</source> <translation>Plăteşte către:</translation> </message> <message> <source>Memo:</source> <translation>Memo:</translation> </message> <message> <source>Enter a label for this address to add it to your address book</source> <translation>Introduceţi o etichetă pentru această adresă pentru a fi adăugată în lista dvs. de adrese</translation> </message> </context> <context> <name>SendConfirmationDialog</name> <message> <source>Yes</source> <translation>Da</translation> </message> </context> <context> <name>ShutdownWindow</name> <message> <source>%1 is shutting down...</source> <translation>%1 se închide</translation> </message> <message> <source>Do not shut down the computer until this window disappears.</source> <translation>Nu închide calculatorul pînă ce această fereastră nu dispare.</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <source>Signatures - Sign / Verify a Message</source> <translation>Semnaturi - Semnează/verifică un mesaj</translation> </message> <message> <source>&amp;Sign Message</source> <translation>&amp;Semnează mesaj</translation> </message> <message> <source>You can sign messages/agreements with your addresses to prove you can receive pottercoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Puteţi semna mesaje/contracte cu adresele dvs. pentru a demostra ca puteti primi pottercoini trimisi la ele. Aveţi grijă să nu semnaţi nimic vag sau aleator, deoarece atacurile de tip phishing vă pot păcăli să le transferaţi identitatea. Semnaţi numai declaraţiile detaliate cu care sînteti de acord.</translation> </message> <message> <source>The Pottercoin address to sign the message with</source> <translation>Adresa cu care semnaţi mesajul</translation> </message> <message> <source>Choose previously used address</source> <translation>Alegeţi adrese folosite anterior</translation> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>Lipeşte adresa copiată din clipboard</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Enter the message you want to sign here</source> <translation>Introduceţi mesajul pe care vreţi să-l semnaţi, aici</translation> </message> <message> <source>Signature</source> <translation>Semnătură</translation> </message> <message> <source>Copy the current signature to the system clipboard</source> <translation>Copiază semnatura curentă în clipboard-ul sistemului</translation> </message> <message> <source>Sign the message to prove you own this Pottercoin address</source> <translation>Semnează mesajul pentru a dovedi ca deţineţi acestă adresă Pottercoin</translation> </message> <message> <source>Sign &amp;Message</source> <translation>Semnează &amp;mesaj</translation> </message> <message> <source>Reset all sign message fields</source> <translation>Resetează toate cîmpurile mesajelor semnate</translation> </message> <message> <source>Clear &amp;All</source> <translation>Curăţă to&amp;ate</translation> </message> <message> <source>&amp;Verify Message</source> <translation>&amp;Verifică mesaj</translation> </message> <message> <source>Enter the receiver's address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack. Note that this only proves the signing party receives with the address, it cannot prove sendership of any transaction!</source> <translation>Introduceţi adresa de semnatură, mesajul (asiguraţi-vă că aţi copiat spaţiile, taburile etc. exact) şi semnatura dedesubt pentru a verifica mesajul. Aveţi grijă să nu citiţi mai mult în semnatură decît mesajul în sine, pentru a evita să fiţi păcăliţi de un atac de tip man-in-the-middle. De notat ca aceasta dovedeste doar ca semnatarul primeste odata cu adresa, nu dovedesta insa trimiterea vreunei tranzactii.</translation> </message> <message> <source>The Pottercoin address the message was signed with</source> <translation>Introduceţi o adresă Pottercoin</translation> </message> <message> <source>Verify the message to ensure it was signed with the specified Pottercoin address</source> <translation>Verificaţi mesajul pentru a vă asigura că a fost semnat cu adresa Pottercoin specificată</translation> </message> <message> <source>Verify &amp;Message</source> <translation>Verifică &amp;mesaj</translation> </message> <message> <source>Reset all verify message fields</source> <translation>Resetează toate cîmpurile mesajelor semnate</translation> </message> <message> <source>Click "Sign Message" to generate signature</source> <translation>Faceţi clic pe "Semneaza msaj" pentru a genera semnătura</translation> </message> <message> <source>The entered address is invalid.</source> <translation>Adresa introdusă este invalidă.</translation> </message> <message> <source>Please check the address and try again.</source> <translation>Vă rugăm verificaţi adresa şi încercaţi din nou.</translation> </message> <message> <source>The entered address does not refer to a key.</source> <translation>Adresa introdusă nu se referă la o cheie.</translation> </message> <message> <source>Wallet unlock was cancelled.</source> <translation>Deblocarea portofelului a fost anulata.</translation> </message> <message> <source>Private key for the entered address is not available.</source> <translation>Cheia privată pentru adresa introdusă nu este disponibila.</translation> </message> <message> <source>Message signing failed.</source> <translation>Semnarea mesajului nu a reuşit.</translation> </message> <message> <source>Message signed.</source> <translation>Mesaj semnat.</translation> </message> <message> <source>The signature could not be decoded.</source> <translation>Semnatura nu a putut fi decodată.</translation> </message> <message> <source>Please check the signature and try again.</source> <translation>Vă rugăm verificaţi semnătura şi încercaţi din nou.</translation> </message> <message> <source>The signature did not match the message digest.</source> <translation>Semnatura nu se potriveşte cu mesajul.</translation> </message> <message> <source>Message verification failed.</source> <translation>Verificarea mesajului nu a reuşit.</translation> </message> <message> <source>Message verified.</source> <translation>Mesaj verificat. </translation> </message> </context> <context> <name>SplashScreen</name> <message> <source>[testnet]</source> <translation>[testnet]</translation> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <source>KB/s</source> <translation>KB/s</translation> </message> </context> <context> <name>TransactionDesc</name> <message numerus="yes"> <source>Open for %n more block(s)</source> <translation><numerusform>Deschis pentru inca un bloc</numerusform><numerusform>Deschis pentru inca %n blocuri</numerusform><numerusform>Deschis pentru inca %n blocuri</numerusform></translation> </message> <message> <source>Open until %1</source> <translation>Deschis pînă la %1</translation> </message> <message> <source>conflicted with a transaction with %1 confirmations</source> <translation>in conflict cu o tranzactie cu %1 confirmari</translation> </message> <message> <source>0/unconfirmed, %1</source> <translation>0/neconfirmat, %1</translation> </message> <message> <source>in memory pool</source> <translation>in memory pool</translation> </message> <message> <source>not in memory pool</source> <translation>nu e in memory pool</translation> </message> <message> <source>abandoned</source> <translation>abandonat</translation> </message> <message> <source>%1/unconfirmed</source> <translation>%1/neconfirmat</translation> </message> <message> <source>%1 confirmations</source> <translation>%1 confirmări</translation> </message> <message> <source>Status</source> <translation>Stare</translation> </message> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Source</source> <translation>Sursa</translation> </message> <message> <source>Generated</source> <translation>Generat</translation> </message> <message> <source>From</source> <translation>De la</translation> </message> <message> <source>unknown</source> <translation>necunoscut</translation> </message> <message> <source>To</source> <translation>Către</translation> </message> <message> <source>own address</source> <translation>adresa proprie</translation> </message> <message> <source>watch-only</source> <translation>doar-supraveghere</translation> </message> <message> <source>label</source> <translation>etichetă</translation> </message> <message> <source>Credit</source> <translation>Credit</translation> </message> <message numerus="yes"> <source>matures in %n more block(s)</source> <translation><numerusform>se matureaza intr-un bloc</numerusform><numerusform>se matureaza in %n blocuri</numerusform><numerusform>se matureaza in %n blocuri</numerusform></translation> </message> <message> <source>not accepted</source> <translation>neacceptat</translation> </message> <message> <source>Debit</source> <translation>Debit</translation> </message> <message> <source>Total debit</source> <translation>Total debit</translation> </message> <message> <source>Total credit</source> <translation>Total credit</translation> </message> <message> <source>Transaction fee</source> <translation>Taxă tranzacţie</translation> </message> <message> <source>Net amount</source> <translation>Suma netă</translation> </message> <message> <source>Message</source> <translation>Mesaj</translation> </message> <message> <source>Comment</source> <translation>Comentariu</translation> </message> <message> <source>Transaction ID</source> <translation>ID tranzacţie</translation> </message> <message> <source>Transaction total size</source> <translation>Dimensiune totala tranzacţie</translation> </message> <message> <source>Transaction virtual size</source> <translation>Dimensiune virtuala a tranzactiei</translation> </message> <message> <source>Output index</source> <translation>Index debit</translation> </message> <message> <source>Merchant</source> <translation>Comerciant</translation> </message> <message> <source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>Monedele generate se pot cheltui doar dupa inca %1 blocuri. După ce a fost generat, s-a propagat în reţea, urmând să fie adăugat in blockchain. Dacă nu poate fi inclus in lanţ, starea sa va deveni "neacceptat" si nu va putea fi folosit la tranzacţii. Acest fenomen se întâmplă atunci cand un alt nod a generat un bloc la o diferenţa de câteva secunde.</translation> </message> <message> <source>Debug information</source> <translation>Informaţii pentru depanare</translation> </message> <message> <source>Transaction</source> <translation>Tranzacţie</translation> </message> <message> <source>Inputs</source> <translation>Intrări</translation> </message> <message> <source>Amount</source> <translation>Cantitate</translation> </message> <message> <source>true</source> <translation>adevărat</translation> </message> <message> <source>false</source> <translation>fals</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <source>This pane shows a detailed description of the transaction</source> <translation>Acest panou arată o descriere detaliată a tranzacţiei</translation> </message> <message> <source>Details for %1</source> <translation>Detalii pentru %1</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Type</source> <translation>Tip</translation> </message> <message> <source>Label</source> <translation>Etichetă</translation> </message> <message> <source>Open until %1</source> <translation>Deschis pînă la %1</translation> </message> <message> <source>Unconfirmed</source> <translation>Neconfirmat</translation> </message> <message> <source>Abandoned</source> <translation>Abandonat</translation> </message> <message> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation>Confirmare (%1 din %2 confirmari recomandate)</translation> </message> <message> <source>Confirmed (%1 confirmations)</source> <translation>Confirmat (%1 confirmari)</translation> </message> <message> <source>Conflicted</source> <translation>În conflict</translation> </message> <message> <source>Immature (%1 confirmations, will be available after %2)</source> <translation>Imatur (%1 confirmari, va fi disponibil după %2)</translation> </message> <message> <source>Generated but not accepted</source> <translation>Generat dar neacceptat</translation> </message> <message> <source>Received with</source> <translation>Recepţionat cu</translation> </message> <message> <source>Received from</source> <translation>Primit de la</translation> </message> <message> <source>Sent to</source> <translation>Trimis către</translation> </message> <message> <source>Payment to yourself</source> <translation>Plată către dvs.</translation> </message> <message> <source>Mined</source> <translation>Minat</translation> </message> <message> <source>watch-only</source> <translation>doar-supraveghere</translation> </message> <message> <source>(n/a)</source> <translation>(indisponibil)</translation> </message> <message> <source>(no label)</source> <translation>(fără etichetă)</translation> </message> <message> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Starea tranzacţiei. Treceţi cu mouse-ul peste acest cîmp pentru afişarea numărului de confirmari.</translation> </message> <message> <source>Date and time that the transaction was received.</source> <translation>Data şi ora la care a fost recepţionată tranzacţia.</translation> </message> <message> <source>Type of transaction.</source> <translation>Tipul tranzacţiei.</translation> </message> <message> <source>Whether or not a watch-only address is involved in this transaction.</source> <translation>Indiferent dacă sau nu o adresa doar-suăpraveghere este implicată în această tranzacţie.</translation> </message> <message> <source>User-defined intent/purpose of the transaction.</source> <translation>Intentie/scop al tranzactie definit de user.</translation> </message> <message> <source>Amount removed from or added to balance.</source> <translation>Suma extrasă sau adăugată la sold.</translation> </message> </context> <context> <name>TransactionView</name> <message> <source>All</source> <translation>Toate</translation> </message> <message> <source>Today</source> <translation>Astăzi</translation> </message> <message> <source>This week</source> <translation>Saptamana aceasta</translation> </message> <message> <source>This month</source> <translation>Luna aceasta</translation> </message> <message> <source>Last month</source> <translation>Luna trecuta</translation> </message> <message> <source>This year</source> <translation>Anul acesta</translation> </message> <message> <source>Range...</source> <translation>Interval...</translation> </message> <message> <source>Received with</source> <translation>Recepţionat cu</translation> </message> <message> <source>Sent to</source> <translation>Trimis către</translation> </message> <message> <source>To yourself</source> <translation>Către dvs.</translation> </message> <message> <source>Mined</source> <translation>Minat</translation> </message> <message> <source>Other</source> <translation>Altele</translation> </message> <message> <source>Enter address, transaction id, or label to search</source> <translation>Introduceți adresa, ID-ul tranzacției, sau eticheta pentru a căuta</translation> </message> <message> <source>Min amount</source> <translation>Suma minimă</translation> </message> <message> <source>Abandon transaction</source> <translation>Abandoneaza tranzacţia</translation> </message> <message> <source>Increase transaction fee</source> <translation>Cresteti comisionul pentru tranzacţie</translation> </message> <message> <source>Copy address</source> <translation>Copiază adresa</translation> </message> <message> <source>Copy label</source> <translation>Copiază eticheta</translation> </message> <message> <source>Copy amount</source> <translation>Copiază suma</translation> </message> <message> <source>Copy transaction ID</source> <translation>Copiază ID tranzacţie</translation> </message> <message> <source>Copy raw transaction</source> <translation>Copiază tranzacţia bruta</translation> </message> <message> <source>Copy full transaction details</source> <translation>Copiaza toate detaliile tranzacţiei</translation> </message> <message> <source>Edit label</source> <translation>Editează eticheta</translation> </message> <message> <source>Show transaction details</source> <translation>Arată detaliile tranzacţiei</translation> </message> <message> <source>Export Transaction History</source> <translation>Export istoric tranzacţii</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>Fisier .csv cu separator - virgula</translation> </message> <message> <source>Confirmed</source> <translation>Confirmat</translation> </message> <message> <source>Watch-only</source> <translation>Doar-supraveghere</translation> </message> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Type</source> <translation>Tip</translation> </message> <message> <source>Label</source> <translation>Etichetă</translation> </message> <message> <source>Address</source> <translation>Adresă</translation> </message> <message> <source>ID</source> <translation>ID</translation> </message> <message> <source>Exporting Failed</source> <translation>Exportarea a eșuat</translation> </message> <message> <source>There was an error trying to save the transaction history to %1.</source> <translation>S-a produs o eroare la salvarea istoricului tranzacţiilor la %1.</translation> </message> <message> <source>Exporting Successful</source> <translation>Export reuşit</translation> </message> <message> <source>The transaction history was successfully saved to %1.</source> <translation>Istoricul tranzacţiilor a fost salvat cu succes la %1.</translation> </message> <message> <source>Range:</source> <translation>Interval:</translation> </message> <message> <source>to</source> <translation>către</translation> </message> </context> <context> <name>UnitDisplayStatusBarControl</name> <message> <source>Unit to show amounts in. Click to select another unit.</source> <translation>Unitatea în care sînt arătate sumele. Faceţi clic pentru a selecta o altă unitate.</translation> </message> </context> <context> <name>WalletController</name> </context> <context> <name>WalletFrame</name> <message> <source>No wallet has been loaded.</source> <translation>Nu a fost încărcat nici un portofel.</translation> </message> </context> <context> <name>WalletModel</name> <message> <source>Send Coins</source> <translation>Trimite monede</translation> </message> <message> <source>Fee bump error</source> <translation>Eroare in cresterea taxei</translation> </message> <message> <source>Increasing transaction fee failed</source> <translation>Cresterea comisionului pentru tranzactie a esuat.</translation> </message> <message> <source>Do you want to increase the fee?</source> <translation>Doriti sa cresteti taxa de tranzactie?</translation> </message> <message> <source>Current fee:</source> <translation>Comision curent:</translation> </message> <message> <source>Increase:</source> <translation>Crestere:</translation> </message> <message> <source>New fee:</source> <translation>Noul comision:</translation> </message> <message> <source>Confirm fee bump</source> <translation>Confirma cresterea comisionului</translation> </message> <message> <source>Can't sign transaction.</source> <translation>Nu s-a reuşit semnarea tranzacţiei</translation> </message> <message> <source>Could not commit transaction</source> <translation>Tranzactia nu a putut fi consemnata.</translation> </message> <message> <source>default wallet</source> <translation>portofel implicit</translation> </message> </context> <context> <name>WalletView</name> <message> <source>&amp;Export</source> <translation>&amp;Export</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>Exportă datele din tab-ul curent într-un fişier</translation> </message> <message> <source>Backup Wallet</source> <translation>Backup portofelul electronic</translation> </message> <message> <source>Wallet Data (*.dat)</source> <translation>Date portofel (*.dat)</translation> </message> <message> <source>Backup Failed</source> <translation>Backup esuat</translation> </message> <message> <source>There was an error trying to save the wallet data to %1.</source> <translation>S-a produs o eroare la salvarea datelor portofelului la %1.</translation> </message> <message> <source>Backup Successful</source> <translation>Backup efectuat cu succes</translation> </message> <message> <source>The wallet data was successfully saved to %1.</source> <translation>Datele portofelului s-au salvat cu succes la %1.</translation> </message> <message> <source>Cancel</source> <translation>Anulare</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <source>Distributed under the MIT software license, see the accompanying file %s or %s</source> <translation>Distribuit sub licenţa de programe MIT, vezi fişierul însoţitor %s sau %s</translation> </message> <message> <source>Prune configured below the minimum of %d MiB. Please use a higher number.</source> <translation>Reductia e configurata sub minimul de %d MiB. Rugam folositi un numar mai mare.</translation> </message> <message> <source>Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)</source> <translation>Reductie: ultima sincronizare merge dincolo de datele reductiei. Trebuie sa faceti -reindex (sa descarcati din nou intregul blockchain in cazul unui nod redus)</translation> </message> <message> <source>Rescans are not possible in pruned mode. You will need to use -reindex which will download the whole blockchain again.</source> <translation>Rescanarile nu sunt posibile in modul redus. Va trebui sa folositi -reindex, ceea ce va descarca din nou intregul blockchain.</translation> </message> <message> <source>Error: A fatal internal error occurred, see debug.log for details</source> <translation>Eroare: S-a produs o eroare interna fatala, vedeti debug.log pentru detalii</translation> </message> <message> <source>Pruning blockstore...</source> <translation>Reductie blockstore...</translation> </message> <message> <source>Unable to start HTTP server. See debug log for details.</source> <translation>Imposibil de pornit serverul HTTP. Pentru detalii vezi logul de depanare.</translation> </message> <message> <source>Pottercoin Core</source> <translation>Nucleul Pottercoin</translation> </message> <message> <source>The %s developers</source> <translation>Dezvoltatorii %s</translation> </message> <message> <source>Cannot obtain a lock on data directory %s. %s is probably already running.</source> <translation>Nu se poate obține o blocare a directorului de date %s. %s probabil rulează deja.</translation> </message> <message> <source>Cannot provide specific connections and have addrman find outgoing connections at the same.</source> <translation>Nu se pot furniza conexiuni specifice in acelasi timp in care addrman este folosit pentru a gasi conexiuni de iesire.</translation> </message> <message> <source>Error reading %s! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Eroare la citirea %s! Toate cheile sînt citite corect, dar datele tranzactiei sau anumite intrări din agenda sînt incorecte sau lipsesc.</translation> </message> <message> <source>Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly.</source> <translation>Vă rugăm verificaţi dacă data/timpul calculatorului dvs. sînt corecte! Dacă ceasul calcultorului este gresit, %s nu va funcţiona corect.</translation> </message> <message> <source>Please contribute if you find %s useful. Visit %s for further information about the software.</source> <translation>Va rugam sa contribuiti daca apreciati ca %s va este util. Vizitati %s pentru mai multe informatii despre software.</translation> </message> <message> <source>The block database contains a block which appears to be from the future. This may be due to your computer's date and time being set incorrectly. Only rebuild the block database if you are sure that your computer's date and time are correct</source> <translation>Baza de date a blocurilor contine un bloc ce pare a fi din viitor. Acest lucru poate fi cauzat de setarea incorecta a datei si orei in computerul dvs. Reconstruiti baza de date a blocurilor doar daca sunteti sigur ca data si ora calculatorului dvs sunt corecte.</translation> </message> <message> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation>Aceasta este o versiune de test preliminară - vă asumaţi riscul folosind-o - nu folosiţi pentru minerit sau aplicaţiile comercianţilor</translation> </message> <message> <source>This is the transaction fee you may discard if change is smaller than dust at this level</source> <translation>Aceasta este taxa de tranzactie la care puteti renunta daca restul este mai mic decat praful la acest nivel.</translation> </message> <message> <source>Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.</source> <translation>Imposibil de refacut blocurile. Va trebui sa reconstruiti baza de date folosind -reindex-chainstate.</translation> </message> <message> <source>Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain</source> <translation>Imposibil de a readuce baza de date la statusul pre-fork. Va trebui redescarcat blockchainul.</translation> </message> <message> <source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source> <translation>Atenţie: Reţeaua nu pare să fie de acord în totalitate! Aparent nişte mineri au probleme.</translation> </message> <message> <source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source> <translation>Atenţie: Aparent, nu sîntem de acord cu toţi partenerii noştri! Va trebui să faceţi o actualizare, sau alte noduri necesită actualizare.</translation> </message> <message> <source>%d of last 100 blocks have unexpected version</source> <translation>%d din ultimele 100 blocuri a o versiune neasteptata</translation> </message> <message> <source>%s corrupt, salvage failed</source> <translation>%s corupt, salvare nereuşită</translation> </message> <message> <source>-maxmempool must be at least %d MB</source> <translation>-maxmempool trebuie sa fie macar %d MB</translation> </message> <message> <source>Cannot resolve -%s address: '%s'</source> <translation>Nu se poate rezolva adresa -%s: '%s'</translation> </message> <message> <source>Change index out of range</source> <translation>Indexul de schimbare este iesit din parametrii</translation> </message> <message> <source>Copyright (C) %i-%i</source> <translation>Copyright (C) %i-%i</translation> </message> <message> <source>Corrupted block database detected</source> <translation>Bloc defect din baza de date detectat</translation> </message> <message> <source>Do you want to rebuild the block database now?</source> <translation>Doriţi să reconstruiţi baza de date blocuri acum?</translation> </message> <message> <source>Error initializing block database</source> <translation>Eroare la iniţializarea bazei de date de blocuri</translation> </message> <message> <source>Error initializing wallet database environment %s!</source> <translation>Eroare la iniţializarea mediului de bază de date a portofelului %s!</translation> </message> <message> <source>Error loading %s</source> <translation>Eroare la încărcarea %s</translation> </message> <message> <source>Error loading %s: Private keys can only be disabled during creation</source> <translation>Eroare la incarcarea %s: Cheile private pot fi dezactivate doar in momentul crearii</translation> </message> <message> <source>Error loading %s: Wallet corrupted</source> <translation>Eroare la încărcarea %s: Portofel corupt</translation> </message> <message> <source>Error loading %s: Wallet requires newer version of %s</source> <translation>Eroare la încărcarea %s: Portofelul are nevoie de o versiune %s mai nouă</translation> </message> <message> <source>Error loading block database</source> <translation>Eroare la încărcarea bazei de date de blocuri</translation> </message> <message> <source>Error opening block database</source> <translation>Eroare la deschiderea bazei de date de blocuri</translation> </message> <message> <source>Error: Disk space is low!</source> <translation>Eroare: Spaţiu pe disc redus!</translation> </message> <message> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Nu s-a reuşit ascultarea pe orice port. Folosiţi -listen=0 dacă vreţi asta.</translation> </message> <message> <source>Failed to rescan the wallet during initialization</source> <translation>Rescanarea portofelului in timpul initializarii a esuat.</translation> </message> <message> <source>Importing...</source> <translation>Import...</translation> </message> <message> <source>Incorrect or no genesis block found. Wrong datadir for network?</source> <translation>Incorect sau nici un bloc de geneza găsit. Directorul de retea greşit?</translation> </message> <message> <source>Initialization sanity check failed. %s is shutting down.</source> <translation>Nu s-a reuşit iniţierea verificării sănătăţii. %s se inchide.</translation> </message> <message> <source>Invalid amount for -%s=&lt;amount&gt;: '%s'</source> <translation>Sumă nevalidă pentru -%s=&lt;amount&gt;: '%s'</translation> </message> <message> <source>Invalid amount for -discardfee=&lt;amount&gt;: '%s'</source> <translation>Sumă nevalidă pentru -discardfee=&lt;amount&gt;: '%s'</translation> </message> <message> <source>Invalid amount for -fallbackfee=&lt;amount&gt;: '%s'</source> <translation>Suma nevalidă pentru -fallbackfee=&lt;amount&gt;: '%s'</translation> </message> <message> <source>Specified blocks directory "%s" does not exist.</source> <translation>Directorul de blocuri "%s" specificat nu exista.</translation> </message> <message> <source>Upgrading txindex database</source> <translation>Actualizarea bazei de date txindex</translation> </message> <message> <source>Loading P2P addresses...</source> <translation>Încărcare adrese P2P...</translation> </message> <message> <source>Loading banlist...</source> <translation>Încărcare banlist...</translation> </message> <message> <source>Not enough file descriptors available.</source> <translation>Nu sînt destule descriptoare disponibile.</translation> </message> <message> <source>Prune cannot be configured with a negative value.</source> <translation>Reductia nu poate fi configurata cu o valoare negativa.</translation> </message> <message> <source>Prune mode is incompatible with -txindex.</source> <translation>Modul redus este incompatibil cu -txindex.</translation> </message> <message> <source>Replaying blocks...</source> <translation>Se reiau blocurile...</translation> </message> <message> <source>Rewinding blocks...</source> <translation>Se deruleaza blocurile...</translation> </message> <message> <source>The source code is available from %s.</source> <translation>Codul sursa este disponibil la %s.</translation> </message> <message> <source>Transaction fee and change calculation failed</source> <translation>Calcului taxei de tranzactie si a restului a esuat.</translation> </message> <message> <source>Unable to bind to %s on this computer. %s is probably already running.</source> <translation>Nu se poate efectua legatura la %s pe acest computer. %s probabil ruleaza deja.</translation> </message> <message> <source>Unable to generate keys</source> <translation>Nu s-au putut genera cheile</translation> </message> <message> <source>Unsupported logging category %s=%s.</source> <translation>Categoria de logging %s=%s nu este suportata.</translation> </message> <message> <source>Upgrading UTXO database</source> <translation>Actualizarea bazei de date UTXO</translation> </message> <message> <source>User Agent comment (%s) contains unsafe characters.</source> <translation>Comentariul (%s) al Agentului Utilizator contine caractere nesigure.</translation> </message> <message> <source>Verifying blocks...</source> <translation>Se verifică blocurile...</translation> </message> <message> <source>Error: Listening for incoming connections failed (listen returned error %s)</source> <translation>Eroare: Ascultarea conexiunilor de intrare nu a reuşit (ascultarea a reurnat eroarea %s)</translation> </message> <message> <source>Invalid amount for -maxtxfee=&lt;amount&gt;: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)</source> <translation>Sumă nevalidă pentru -maxtxfee=&lt;amount&gt;: '%s' (trebuie să fie cel puţin taxa minrelay de %s pentru a preveni blocarea tranzactiilor)</translation> </message> <message> <source>The transaction amount is too small to send after the fee has been deducted</source> <translation>Suma tranzactiei este prea mica pentru a fi trimisa dupa ce se scade taxa.</translation> </message> <message> <source>You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain</source> <translation>Trebuie reconstruita intreaga baza de date folosind -reindex pentru a va intoarce la modul non-redus. Aceasta va determina descarcarea din nou a intregului blockchain</translation> </message> <message> <source>Error reading from database, shutting down.</source> <translation>Eroare la citirea bazei de date. Oprire.</translation> </message> <message> <source>Error upgrading chainstate database</source> <translation>Eroare la actualizarea bazei de date chainstate</translation> </message> <message> <source>Information</source> <translation>Informaţie</translation> </message> <message> <source>Invalid -onion address or hostname: '%s'</source> <translation>Adresa sau hostname -onion invalide: '%s'</translation> </message> <message> <source>Invalid -proxy address or hostname: '%s'</source> <translation>Adresa sau hostname -proxy invalide: '%s'</translation> </message> <message> <source>Invalid amount for -paytxfee=&lt;amount&gt;: '%s' (must be at least %s)</source> <translation>Sumă nevalidă pentru -paytxfee=&lt;suma&gt;: '%s' (trebuie să fie cel puţin %s)</translation> </message> <message> <source>Invalid netmask specified in -whitelist: '%s'</source> <translation>Mască reţea nevalidă specificată în -whitelist: '%s'</translation> </message> <message> <source>Need to specify a port with -whitebind: '%s'</source> <translation>Trebuie să specificaţi un port cu -whitebind: '%s'</translation> </message> <message> <source>Reducing -maxconnections from %d to %d, because of system limitations.</source> <translation>Se micsoreaza -maxconnections de la %d la %d, datorita limitarilor de sistem.</translation> </message> <message> <source>Signing transaction failed</source> <translation>Nu s-a reuşit semnarea tranzacţiei</translation> </message> <message> <source>Specified -walletdir "%s" does not exist</source> <translation>Nu exista -walletdir "%s" specificat</translation> </message> <message> <source>Specified -walletdir "%s" is a relative path</source> <translation>-walletdir "%s" specificat este o cale relativa</translation> </message> <message> <source>The transaction amount is too small to pay the fee</source> <translation>Suma tranzactiei este prea mica pentru plata taxei</translation> </message> <message> <source>This is experimental software.</source> <translation>Acesta este un program experimental.</translation> </message> <message> <source>Transaction amount too small</source> <translation>Suma tranzacţionată este prea mică</translation> </message> <message> <source>Transaction too large for fee policy</source> <translation>Tranzacţia are suma prea mare pentru a beneficia de gratuitate</translation> </message> <message> <source>Transaction too large</source> <translation>Tranzacţie prea mare</translation> </message> <message> <source>Unable to bind to %s on this computer (bind returned error %s)</source> <translation>Nu se poate lega la %s pe acest calculator. (Legarea a întors eroarea %s)</translation> </message> <message> <source>Unable to generate initial keys</source> <translation>Nu s-au putut genera cheile initiale</translation> </message> <message> <source>Verifying wallet(s)...</source> <translation>Se verifică portofelul(ele)...</translation> </message> <message> <source>Wallet %s resides outside wallet directory %s</source> <translation>Portofelul %s se află în afara directorului portofelului %s</translation> </message> <message> <source>Warning</source> <translation>Avertisment</translation> </message> <message> <source>Warning: unknown new rules activated (versionbit %i)</source> <translation>Atentie: se activeaza reguli noi necunoscute (versionbit %i)</translation> </message> <message> <source>Zapping all transactions from wallet...</source> <translation>Şterge toate tranzacţiile din portofel...</translation> </message> <message> <source>-maxtxfee is set very high! Fees this large could be paid on a single transaction.</source> <translation>-maxtxfee este setata foarte sus! Se pot plati taxe de aceasta marime pe o singura tranzactie.</translation> </message> <message> <source>This is the transaction fee you may pay when fee estimates are not available.</source> <translation>Aceasta este taxa de tranzactie pe care este posibil sa o platiti daca estimarile de taxe nu sunt disponibile.</translation> </message> <message> <source>This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit %s and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.</source> <translation>Acest produs include software dezvoltat de OpenSSL Project pentru a fi folosit in Toolkitul OpenSSL %s, software criptografic scris de Eric Young si software UPnP scris de Thomas Bernard. </translation> </message> <message> <source>Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments.</source> <translation>Lungimea totala a sirului versiunii retelei (%i) depaseste lungimea maxima (%i). Reduceti numarul sa dimensiunea uacomments. </translation> </message> <message> <source>Warning: Wallet file corrupt, data salvaged! Original %s saved as %s in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Atenţie: fişierul portofelului este corupt, date salvate! Fişierul %s a fost salvat ca %s in %s; dacă balanta sau tranzactiile sunt incorecte ar trebui să restauraţi dintr-o copie de siguranţă.</translation> </message> <message> <source>%s is set very high!</source> <translation>%s este setata foarte sus!</translation> </message> <message> <source>Error loading wallet %s. Duplicate -wallet filename specified.</source> <translation>Eroare la incarcarea portofelului %s. Este specificat un fisier -wallet duplicat.</translation> </message> <message> <source>Keypool ran out, please call keypoolrefill first</source> <translation>Keypool epuizat, folositi intai functia keypoolrefill</translation> </message> <message> <source>Starting network threads...</source> <translation>Se pornesc threadurile retelei...</translation> </message> <message> <source>The wallet will avoid paying less than the minimum relay fee.</source> <translation>Portofelul va evita sa plateasca mai putin decat minimul taxei de retransmisie.</translation> </message> <message> <source>This is the minimum transaction fee you pay on every transaction.</source> <translation>Acesta este minimum de taxa de tranzactie care va fi platit la fiecare tranzactie.</translation> </message> <message> <source>This is the transaction fee you will pay if you send a transaction.</source> <translation>Aceasta este taxa de tranzactie pe care o platiti cand trimiteti o tranzactie.</translation> </message> <message> <source>Transaction amounts must not be negative</source> <translation>Sumele tranzactionate nu pot fi negative</translation> </message> <message> <source>Transaction has too long of a mempool chain</source> <translation>Tranzacţia are o lungime prea mare in lantul mempool</translation> </message> <message> <source>Transaction must have at least one recipient</source> <translation>Tranzactia trebuie sa aiba cel putin un destinatar</translation> </message> <message> <source>Unknown network specified in -onlynet: '%s'</source> <translation>Reţeaua specificată în -onlynet este necunoscută: '%s'</translation> </message> <message> <source>Insufficient funds</source> <translation>Fonduri insuficiente</translation> </message> <message> <source>Fee estimation failed. Fallbackfee is disabled. Wait a few blocks or enable -fallbackfee.</source> <translation>Estimarea taxei a esuat. Taxa implicita este dezactivata. Asteptati cateva blocuri, sau activati -fallbackfee.</translation> </message> <message> <source>Warning: Private keys detected in wallet {%s} with disabled private keys</source> <translation>Atentie: S-au detectat chei private in portofelul {%s} cu cheile private dezactivate</translation> </message> <message> <source>Cannot write to data directory '%s'; check permissions.</source> <translation>Nu se poate scrie in directorul de date '%s"; verificati permisiunile.</translation> </message> <message> <source>Loading block index...</source> <translation>Încărcare index bloc...</translation> </message> <message> <source>Loading wallet...</source> <translation>Încărcare portofel...</translation> </message> <message> <source>Cannot downgrade wallet</source> <translation>Nu se poate retrograda portofelul</translation> </message> <message> <source>Rescanning...</source> <translation>Rescanare...</translation> </message> <message> <source>Done loading</source> <translation>Încărcare terminată</translation> </message> <message> <source>Error</source> <translation>Eroare</translation> </message> </context> </TS>
<source>Network activity disabled</source> <translation>Activitatea retelei a fost oprita.</translation>
Test.test.ts
import * as assert from "assert"; import { Context } from "egg"; import { app } from "egg-mock/bootstrap"; describe("test/app/service/Test.test.js", () => { let ctx: Context; before(async () => { ctx = app.mockContext(); }); it("sayHi", async () => { const result = await ctx.service.test.sayHi("egg");
assert(result === "hi, egg"); }); });
scar.rs
// Copyright 2020 Cargill Incorporated // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::env::{split_paths, var_os}; use std::fs::File; use std::io::Read; use std::path::{Path, PathBuf}; use bzip2::read::BzDecoder; use tar::Archive; use super::Error; const SCAR_FILE_EXTENSION: &str = "scar"; const SCAR_PATH_ENV_VAR: &str = "SCAR_PATH"; const MANIFEST_FILENAME: &str = "manifest.yaml"; const WASM_FILE_EXTENSION: &str = "wasm"; /// The definition of a Sabre smart contract, including the bytes of the smart contract itself and /// the associated metadata that is required for submitting the smart contract to scabbard. #[derive(Debug)] pub struct SabreSmartContractDefinition { pub contract: Vec<u8>, pub metadata: SabreSmartContractMetadata, } impl SabreSmartContractDefinition { /// Load a `SabreSmartContractDefinition` from a .scar file on the local filesystem. /// /// If the argument is a file path (contains a '/'), this will attempt to load the .scar from /// the specified location. If the argument is not a file path, this will attempt to load the /// .scar from the directories listed in the SCAR_PATH environment variable. When loading from /// a directory in SCAR_PATH, the '.scar' file extension is optional. pub fn new_from_scar(scar: &str) -> Result<SabreSmartContractDefinition, Error> { let scar_file_path = determine_scar_file_path(scar)?; load_smart_contract_from_file(&scar_file_path) } } /// The metadata of a smart contract that needs to be included in the Sabre transaction. #[derive(Debug, Deserialize, Serialize)] pub struct SabreSmartContractMetadata { pub name: String, pub version: String, pub inputs: Vec<String>, pub outputs: Vec<String>, } fn determine_scar_file_path(scar: &str) -> Result<PathBuf, Error> { if scar.contains('/') { Ok(PathBuf::from(scar)) } else { let scar_paths = var_os(SCAR_PATH_ENV_VAR).ok_or_else(|| { Error::new(&format!( "cannot find scar file: {} not set", SCAR_PATH_ENV_VAR )) })?; split_paths(&scar_paths) .find_map(|mut path| { path.push(scar); if path.exists() { Some(path) } else { path.set_extension(SCAR_FILE_EXTENSION); if path.exists() { Some(path) } else { None } } }) .ok_or_else(|| Error::new(&format!("{} not found in {}", scar, SCAR_PATH_ENV_VAR))) } } fn load_smart_contract_from_file(file_path: &Path) -> Result<SabreSmartContractDefinition, Error> { let scar_file = File::open(file_path).map_err(|err| { Error::new_with_source( &format!("failed to open file {}", file_path.display()), err.into(), ) })?; let mut archive = Archive::new(BzDecoder::new(scar_file)); let archive_entries = archive .entries() .map_err(|err| Error::new_with_source("failed to read scar file", err.into()))?; let mut metadata = None; let mut contract = None; for entry in archive_entries { let mut entry = entry.map_err(|err| { Error::new_with_source( "invalid scar file: failed to read archive entry", err.into(), ) })?; let path = entry .path() .map_err(|err| { Error::new_with_source( "invalid scar file: failed to get path of archive entry", err.into(), ) })? .into_owned(); if path_is_manifest(&path) { metadata = Some(serde_yaml::from_reader(entry).map_err(|err| { Error::new_with_source("invalid scar file: manifest.yaml invalid", err.into()) })?); } else if path_is_wasm(&path) { let mut contract_bytes = vec![]; entry.read_to_end(&mut contract_bytes).map_err(|err| { Error::new_with_source( "invalid scar file: failed to read smart contract", err.into(), ) })?; contract = Some(contract_bytes); } } Ok(SabreSmartContractDefinition { metadata: metadata .ok_or_else(|| Error::new("invalid scar file: manifest.yaml not found"))?, contract: contract .ok_or_else(|| Error::new("invalid scar file: smart contract not found"))?, }) } fn
(path: &std::path::Path) -> bool { path.file_name() .map(|file_name| file_name == MANIFEST_FILENAME) .unwrap_or(false) } fn path_is_wasm(path: &std::path::Path) -> bool { match path.extension() { Some(extension) => extension == WASM_FILE_EXTENSION, None => false, } } #[cfg(test)] mod tests { use super::*; use std::io::Write; use std::path::Path; use bzip2::write::BzEncoder; use bzip2::Compression; use serde::Serialize; use serial_test::serial; use tar::Builder; use tempdir::TempDir; const MOCK_CONTRACT_BYTES: &[u8] = &[0x00, 0x01, 0x02, 0x03]; const MOCK_CONTRACT_FILENAME: &str = "mock.wasm"; const MOCK_SCAR_FILENAME: &str = "mock.scar"; // The tests in this module must run serially because some tests modify environment variable(s) // that are used by all tests. Each test is annotated with `#[serial(scar_path)]` to enforce // this. /// Verify that a .scar file can be loaded by providing the name + extension of a .scar file /// that is located in one of the paths specified by the SCAR_PATH environment variable. /// Example: `mock.scar` -> `/path/to/mock.scar`, SCAR_PATH contains `/path/to` #[test] #[serial(scar_path)] fn load_smart_contract_from_path_with_file_extension_successful() { let setup = UploadTestSetup::new().build(); SabreSmartContractDefinition::new_from_scar(&setup.scar) .expect("failed to perform upload action"); } /// Verify that a .scar file can be loaded by providing the name of a .scar file, without a /// file extension, that is located in one of the paths specified by the SCAR_PATH environment /// variable. /// Example: `mock` -> `/path/to/mock.scar`, SCAR_PATH contains `/path/to` #[test] #[serial(scar_path)] fn load_smart_contract_from_path_without_file_extension_successful() { let setup = UploadTestSetup::new().with_scar_without_extension().build(); SabreSmartContractDefinition::new_from_scar(&setup.scar) .expect("failed to perform upload action"); } /// Verify that a .scar file can be loaded by providing a full path to the .scar file. /// Example: `/path/to/mock.scar` #[test] #[serial(scar_path)] fn load_smart_contract_from_file_successful() { let setup = UploadTestSetup::new().with_scar_from_file().build(); SabreSmartContractDefinition::new_from_scar(&setup.scar) .expect("failed to perform upload action"); } /// Verify that an error is returned when attempting to load a non-existent .scar file. #[test] #[serial(scar_path)] fn load_smart_contract_file_not_found() { let setup = UploadTestSetup::new() .with_scar("/non_existent_dir/mock.scar".into()) .build(); assert!(SabreSmartContractDefinition::new_from_scar(&setup.scar).is_err()); } /// Verify that an error is returned when attempting to load a .scar file from SCAR_PATH, but /// SCAR_PATH is not set. #[test] #[serial(scar_path)] fn load_smart_contract_path_not_set() { let setup = UploadTestSetup::new().with_scar_path_env_var(None).build(); assert!(SabreSmartContractDefinition::new_from_scar(&setup.scar).is_err()); } /// Verify that an error is returned when attempting to load a .scar file from SCAR_PATH, but /// the specified .scar file cannout be found in SCAR_PATH. #[test] #[serial(scar_path)] fn load_smart_contract_not_found_in_path() { let setup = UploadTestSetup::new() .with_scar_path_env_var(Some("".into())) .build(); assert!(SabreSmartContractDefinition::new_from_scar(&setup.scar).is_err()); } /// Verify that an error is returned when attempting to load a .scar file that does not contain /// a `manifest.yaml` file. #[test] #[serial(scar_path)] fn load_smart_contract_manifest_not_found() { let setup = UploadTestSetup::new() .with_manifest::<SabreSmartContractMetadata>(None) .build(); assert!(SabreSmartContractDefinition::new_from_scar(&setup.scar).is_err()); } /// Verify that an error is returned when attempting to load a .scar file whose `manifest.yaml` /// is invalidly formatted. #[test] #[serial(scar_path)] fn load_smart_contract_manifest_invalid() { let setup = UploadTestSetup::new().with_manifest(Some("")).build(); assert!(SabreSmartContractDefinition::new_from_scar(&setup.scar).is_err()); } /// Verify that an error is returned when attempting to load a .scar file that does not contain /// a .wasm smart contract. #[test] #[serial(scar_path)] fn load_smart_contract_contract_not_found() { let setup = UploadTestSetup::new().set_contract(false).build(); assert!(SabreSmartContractDefinition::new_from_scar(&setup.scar).is_err()); } /// Builder for setting up the test environment. By default, the builder will create a valid /// environment for loading a .scar file from SCAR_PATH with the filename + extension of the /// .scar file. struct UploadTestSetup { temp_dir: TempDir, set_contract: bool, manifest: Option<Vec<u8>>, scar_path_env_var: Option<String>, scar: String, } impl UploadTestSetup { fn new() -> Self { let temp_dir = new_temp_dir(); let scar_path_env_var = temp_dir.path().to_string_lossy().into_owned(); let scar = MOCK_SCAR_FILENAME.into(); Self { temp_dir, set_contract: true, manifest: Some( serde_yaml::to_vec(&get_mock_smart_contract_metadata()) .expect("failed to serialize manifest"), ), scar_path_env_var: Some(scar_path_env_var), scar, } } fn set_contract(mut self, set_contract: bool) -> Self { self.set_contract = set_contract; self } fn with_manifest<T: Serialize>(mut self, manifest: Option<T>) -> Self { self.manifest = manifest.map(|manifest| { serde_yaml::to_vec(&manifest).expect("failed to serialize manifest") }); self } fn with_scar_path_env_var(mut self, scar_path_env_var: Option<String>) -> Self { self.scar_path_env_var = scar_path_env_var; self } fn with_scar_from_file(mut self) -> Self { self.scar = self .temp_dir .path() .join(MOCK_SCAR_FILENAME) .to_string_lossy() .into_owned(); self } fn with_scar_without_extension(mut self) -> Self { self.scar = MOCK_SCAR_FILENAME .split(".") .next() .expect("failed to get stem from mock scar filename") .into(); self } fn with_scar(mut self, scar: String) -> Self { self.scar = scar; self } fn build(self) -> SetupHandle { match self.scar_path_env_var { Some(scar_path_env_var) => std::env::set_var(SCAR_PATH_ENV_VAR, scar_path_env_var), None => std::env::remove_var(SCAR_PATH_ENV_VAR), } add_mock_scar_to_dir(self.temp_dir.path(), self.manifest, self.set_contract); SetupHandle { _temp_dir: self.temp_dir, scar: self.scar, } } } /// This handle is used to keep the temp directory open (since it is removed when dropped) and /// to provide the value of the `scar` argument for testing. struct SetupHandle { _temp_dir: TempDir, scar: String, } fn new_temp_dir() -> TempDir { let thread_id = format!("{:?}", std::thread::current().id()); TempDir::new(&thread_id).expect("failed to create temp dir") } /// Add a mock .scar file to the given directory, with the given manifest file (as bytes) and /// with or without a mock contract (as specified by `add_contract`). fn add_mock_scar_to_dir(dir: &Path, manifest: Option<Vec<u8>>, add_contract: bool) { let scar_file_path = dir.join(MOCK_SCAR_FILENAME); let scar = File::create(scar_file_path.as_path()).expect("failed to create scar file"); let mut scar_builder = Builder::new(BzEncoder::new(scar, Compression::Default)); if let Some(manifest) = manifest { let manifest_file_path = dir.join(MANIFEST_FILENAME); let mut manifest_file = File::create(manifest_file_path.as_path()).expect("failed to create manifest file"); manifest_file .write_all(manifest.as_slice()) .expect("failed to write manifest file"); scar_builder .append_path_with_name(manifest_file_path, MANIFEST_FILENAME) .expect("failed to add manifest to scar file"); } if add_contract { let contract_file_path = dir.join(MOCK_CONTRACT_FILENAME); let mut contract_file = File::create(contract_file_path.as_path()).expect("failed to create contract file"); contract_file .write_all(MOCK_CONTRACT_BYTES) .expect("failed to write contract file"); scar_builder .append_path_with_name(contract_file_path, MOCK_CONTRACT_FILENAME) .expect("failed to add contract to scar file"); } scar_builder.finish().expect("failed to write scar file"); } fn get_mock_smart_contract_metadata() -> SabreSmartContractMetadata { SabreSmartContractMetadata { name: "mock".into(), version: "1.0".into(), inputs: vec!["abcdef".into()], outputs: vec!["012345".into()], } } }
path_is_manifest
text.ts
import { XmlAttributeComponent, XmlComponent } from "file/xml-components"; class TextAttributes extends XmlAttributeComponent<{ readonly space: SpaceType }> { protected readonly xmlKeys = { space: "xml:space" }; } export class Text extends XmlComponent { constructor(text: string) { super("w:t"); this.root.push(new TextAttributes({ space: SpaceType.PRESERVE })); this.root.push(text); } }
import { SpaceType } from "file/space-type";
syscall.rs
// SPDX-License-Identifier: Apache-2.0 use super::Execute; use crate::{item, Result};
use core::arch::asm; use libc::c_long; struct Syscall<'a, const ARGS: usize, const RETS: usize> { /// The syscall number for the request. /// /// See, for example, [`libc::SYS_exit`](libc::SYS_exit). num: c_long, /// The syscall argument vector. argv: [usize; ARGS], /// Return values. ret: [&'a mut usize; RETS], } impl Execute for Syscall<'_, 0, 1> { #[inline] unsafe fn execute(self) { asm!( "syscall", inlateout("rax") self.num as usize => *self.ret[0], lateout("rcx") _, // clobbered lateout("r11") _, // clobbered ) } } impl Execute for Syscall<'_, 1, 1> { #[inline] unsafe fn execute(self) { asm!( "syscall", inlateout("rax") self.num as usize => *self.ret[0], in("rdi") self.argv[0], lateout("rcx") _, // clobbered lateout("r11") _, // clobbered ) } } impl Execute for Syscall<'_, 2, 1> { #[inline] unsafe fn execute(self) { asm!( "syscall", inlateout("rax") self.num as usize => *self.ret[0], in("rdi") self.argv[0], in("rsi") self.argv[1], lateout("rcx") _, // clobbered lateout("r11") _, // clobbered ) } } impl Execute for Syscall<'_, 3, 1> { #[inline] unsafe fn execute(self) { asm!( "syscall", inlateout("rax") self.num as usize => *self.ret[0], in("rdi") self.argv[0], in("rsi") self.argv[1], in("rdx") self.argv[2], lateout("rcx") _, // clobbered lateout("r11") _, // clobbered ) } } impl Execute for Syscall<'_, 4, 1> { #[inline] unsafe fn execute(self) { asm!( "syscall", inlateout("rax") self.num as usize => *self.ret[0], in("rdi") self.argv[0], in("rsi") self.argv[1], in("rdx") self.argv[2], in("r10") self.argv[3], lateout("rcx") _, // clobbered lateout("r11") _, // clobbered ) } } impl Execute for Syscall<'_, 5, 1> { #[inline] unsafe fn execute(self) { asm!( "syscall", inlateout("rax") self.num as usize => *self.ret[0], in("rdi") self.argv[0], in("rsi") self.argv[1], in("rdx") self.argv[2], in("r10") self.argv[3], in("r8") self.argv[4], lateout("rcx") _, // clobbered lateout("r11") _, // clobbered ) } } impl Execute for Syscall<'_, 6, 1> { #[inline] unsafe fn execute(self) { asm!( "syscall", inlateout("rax") self.num as usize => *self.ret[0], in("rdi") self.argv[0], in("rsi") self.argv[1], in("rdx") self.argv[2], in("r10") self.argv[3], in("r8") self.argv[4], in("r9") self.argv[5], lateout("rcx") _, // clobbered lateout("r11") _, // clobbered ) } } pub(super) unsafe fn execute_syscall(syscall: &mut item::Syscall, data: &mut [u8]) -> Result<()> { match syscall { item::Syscall { num, argv: [fd, ..], ret: [ret, ..], } if *num == libc::SYS_close as _ => Syscall { num: libc::SYS_close, argv: [*fd], ret: [ret], } .execute(), item::Syscall { num, argv: [status, ..], ret: [ret, ..], } if *num == libc::SYS_exit as _ => Syscall { num: libc::SYS_exit, argv: [*status], ret: [ret], } .execute(), item::Syscall { num, argv: [fd, cmd, arg, ..], ret: [ret, ..], } if *num == libc::SYS_fcntl as _ => Syscall { num: libc::SYS_fcntl, argv: [*fd, *cmd, *arg], ret: [ret], } .execute(), item::Syscall { num, argv: [fd, buf_offset, count, ..], ret: [ret, ..], } if *num == libc::SYS_read as _ => { if *count > data.len() || data.len() - *count < *buf_offset { return Err(libc::EFAULT); } Syscall { num: libc::SYS_read, argv: [*fd, data[*buf_offset..].as_ptr() as _, *count], ret: [ret], } .execute(); } item::Syscall { num, argv: _, ret: [ret, ..], } if *num == libc::SYS_sync as _ => Syscall { num: libc::SYS_sync, argv: [], ret: [ret], } .execute(), item::Syscall { num, argv: [fd, buf_offset, count, ..], ret: [ret, ..], } if *num == libc::SYS_write as _ => { if *count > data.len() || data.len() - *count < *buf_offset { return Err(libc::EFAULT); } Syscall { num: libc::SYS_write, argv: [*fd, data[*buf_offset..].as_ptr() as _, *count], ret: [ret], } .execute(); } _ => return Err(libc::ENOSYS), } Ok(()) }
wkt.py
# Copyright 2013 Lars Butler & individual contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tokenize try: import StringIO except ImportError: import io StringIO = io INVALID_WKT_FMT = 'Invalid WKT: `%s`' def dump(obj, dest_file): """ Dump GeoJSON-like `dict` to WKT and write it to the `dest_file`. :param dict obj: A GeoJSON-like dictionary. It must at least the keys 'type' and 'coordinates'. :param dest_file: Open and writable file-like object. """ dest_file.write(dumps(obj)) def load(source_file): """ Load a GeoJSON `dict` object from a ``source_file`` containing WKT. :param source_file: Open and readable file-like object. :returns: A GeoJSON `dict` representing the geometry read from the file. """ return loads(source_file.read()) def dumps(obj, decimals=16): """ Dump a GeoJSON-like `dict` to a WKT string. """ geom_type = obj['type'] exporter = _dumps_registry.get(geom_type) if exporter is None: _unsupported_geom_type(geom_type) fmt = '%%.%df' % decimals return exporter(obj, fmt) def loads(string): """ Construct a GeoJSON `dict` from WKT (`string`). """ sio = StringIO.StringIO(string) # NOTE: This is not the intended purpose of `tokenize`, but it works. tokens = (x[1] for x in tokenize.generate_tokens(sio.readline)) tokens = _tokenize_wkt(tokens) geom_type = next(tokens) importer = _loads_registry.get(geom_type) if importer is None: _unsupported_geom_type(geom_type) return importer(tokens, string) def _tokenize_wkt(tokens): """ Since the tokenizer treats "-" and numeric strings as separate values, combine them and yield them as a single token. This utility encapsulates parsing of negative numeric values from WKT can be used generically in all parsers. """ negative = False for t in tokens: if t == '-': negative = True continue else: if negative: yield '-%s' % t else: yield t negative = False def _unsupported_geom_type(geom_type): raise ValueError("Unsupported geometry type '%s'" % geom_type) def _dump_point(obj, fmt): """ Dump a GeoJSON-like Point object to WKT. :param dict obj: A GeoJSON-like `dict` representing a Point. :param str fmt: Format string which indicates the number of digits to display after the decimal point when formatting coordinates. :returns: WKT representation of the input GeoJSON Point ``obj``. """ coords = obj['coordinates'] pt = 'POINT (%s)' % ' '.join(fmt % c for c in coords) return pt def _dump_linestring(obj, fmt): """ Dump a GeoJSON-like LineString object to WKT. Input parameters and return value are the LINESTRING equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] ls = 'LINESTRING (%s)' ls %= ', '.join(' '.join(fmt % c for c in pt) for pt in coords) return ls def _dump_polygon(obj, fmt): """ Dump a GeoJSON-like Polygon object to WKT. Input parameters and return value are the POLYGON equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] poly = 'POLYGON (%s)' rings = (', '.join(' '.join(fmt % c for c in pt) for pt in ring) for ring in coords) rings = ('(%s)' % r for r in rings) poly %= ', '.join(rings) return poly def _dump_multipoint(obj, fmt): """ Dump a GeoJSON-like MultiPoint object to WKT. Input parameters and return value are the MULTIPOINT equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] mp = 'MULTIPOINT (%s)' points = (' '.join(fmt % c for c in pt) for pt in coords) # Add parens around each point. points = ('(%s)' % pt for pt in points) mp %= ', '.join(points) return mp def _dump_multilinestring(obj, fmt): """ Dump a GeoJSON-like MultiLineString object to WKT. Input parameters and return value are the MULTILINESTRING equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] mlls = 'MULTILINESTRING (%s)' linestrs = ('(%s)' % ', '.join(' '.join(fmt % c for c in pt) for pt in linestr) for linestr in coords) mlls %= ', '.join(ls for ls in linestrs) return mlls def _dump_multipolygon(obj, fmt): """ Dump a GeoJSON-like MultiPolygon object to WKT. Input parameters and return value are the MULTIPOLYGON equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] mp = 'MULTIPOLYGON (%s)' polys = ( # join the polygons in the multipolygon ', '.join( # join the rings in a polygon, # and wrap in parens '(%s)' % ', '.join( # join the points in a ring, # and wrap in parens '(%s)' % ', '.join( # join coordinate values of a vertex ' '.join(fmt % c for c in pt) for pt in ring) for ring in poly) for poly in coords) ) mp %= polys return mp def _dump_geometrycollection(obj, fmt): """ Dump a GeoJSON-like GeometryCollection object to WKT. Input parameters and return value are the GEOMETRYCOLLECTION equivalent to :func:`_dump_point`. The WKT conversions for each geometry in the collection are delegated to their respective functions. """ gc = 'GEOMETRYCOLLECTION (%s)' geoms = obj['geometries'] geoms_wkt = [] for geom in geoms: geom_type = geom['type'] geoms_wkt.append(_dumps_registry.get(geom_type)(geom, fmt)) gc %= ','.join(geoms_wkt) return gc def _load_point(tokens, string): """ :param tokens: A generator of string tokens for the input WKT, begining just after the geometry type. The geometry type is consumed before we get to here. For example, if :func:`loads` is called with the input 'POINT(0.0 1.0)', ``tokens`` would generate the following values: .. code-block:: python ['(', '0.0', '1.0', ')'] :param str string: The original WKT string. :returns: A GeoJSON `dict` Point representation of the WKT ``string``. """ if not next(tokens) == '(': raise ValueError(INVALID_WKT_FMT % string) coords = [] try: for t in tokens: if t == ')': break else: coords.append(float(t)) except tokenize.TokenError: raise ValueError(INVALID_WKT_FMT % string) return dict(type='Point', coordinates=coords) def _load_linestring(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling LINESTRING geometry. :returns: A GeoJSON `dict` LineString representation of the WKT ``string``. """ if not next(tokens) == '(': raise ValueError(INVALID_WKT_FMT % string) # a list of lists # each member list represents a point coords = [] try: pt = [] for t in tokens: if t == ')': coords.append(pt) break elif t == ',': # it's the end of the point coords.append(pt) pt = [] else: pt.append(float(t)) except tokenize.TokenError: raise ValueError(INVALID_WKT_FMT % string) return dict(type='LineString', coordinates=coords) def _load_polygon(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling POLYGON geometry. :returns: A GeoJSON `dict` Polygon representation of the WKT ``string``. """ open_parens = next(tokens), next(tokens) if not open_parens == ('(', '('): raise ValueError(INVALID_WKT_FMT % string) # coords contains a list of rings # each ring contains a list of points # each point is a list of 2-4 values coords = [] ring = [] on_ring = True try: pt = [] for t in tokens: if t == ')' and on_ring: # The ring is finished ring.append(pt) coords.append(ring) on_ring = False elif t == ')' and not on_ring: # it's the end of the polygon break elif t == '(': # it's a new ring ring = [] pt = [] on_ring = True elif t == ',' and on_ring: # it's the end of a point ring.append(pt) pt = [] elif t == ',' and not on_ring: # there's another ring. # do nothing pass else: pt.append(float(t)) except tokenize.TokenError: raise ValueError(INVALID_WKT_FMT % string) return dict(type='Polygon', coordinates=coords) def _load_multipoint(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling MULTIPOINT geometry. :returns: A GeoJSON `dict` MultiPoint representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) coords = [] pt = [] paren_depth = 1 try: for t in tokens: if t == '(': paren_depth += 1 elif t == ')': paren_depth -= 1 if paren_depth == 0: break elif t == '': pass elif t == ',': # the point is done coords.append(pt) pt = [] else: pt.append(float(t)) except tokenize.TokenError: raise ValueError(INVALID_WKT_FMT % string) # Given the way we're parsing, we'll probably have to deal with the last # point after the loop if len(pt) > 0: coords.append(pt) return dict(type='MultiPoint', coordinates=coords)
def _load_multipolygon(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling MULTIPOLYGON geometry. :returns: A GeoJSON `dict` MultiPolygon representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) polygons = [] while True: try: poly = _load_polygon(tokens, string) polygons.append(poly['coordinates']) t = next(tokens) if t == ')': # we're done; no more polygons. break except StopIteration: # If we reach this, the WKT is not valid. raise ValueError(INVALID_WKT_FMT % string) return dict(type='MultiPolygon', coordinates=polygons) def _load_multilinestring(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling MULTILINESTRING geometry. :returns: A GeoJSON `dict` MultiLineString representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) linestrs = [] while True: try: linestr = _load_linestring(tokens, string) linestrs.append(linestr['coordinates']) t = next(tokens) if t == ')': # we're done; no more linestrings. break except StopIteration: # If we reach this, the WKT is not valid. raise ValueError(INVALID_WKT_FMT % string) return dict(type='MultiLineString', coordinates=linestrs) def _load_geometrycollection(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling GEOMETRYCOLLECTIONs. Delegates parsing to the parsers for the individual geometry types. :returns: A GeoJSON `dict` GeometryCollection representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) geoms = [] result = dict(type='GeometryCollection', geometries=geoms) while True: try: t = next(tokens) if t == ')': break elif t == ',': # another geometry still continue else: geom_type = t load_func = _loads_registry.get(geom_type) geom = load_func(tokens, string) geoms.append(geom) except StopIteration: raise ValueError(INVALID_WKT_FMT % string) return result _dumps_registry = { 'Point': _dump_point, 'LineString': _dump_linestring, 'Polygon': _dump_polygon, 'MultiPoint': _dump_multipoint, 'MultiLineString': _dump_multilinestring, 'MultiPolygon': _dump_multipolygon, 'GeometryCollection': _dump_geometrycollection, } _loads_registry = { 'POINT': _load_point, 'LINESTRING': _load_linestring, 'POLYGON': _load_polygon, 'MULTIPOINT': _load_multipoint, 'MULTILINESTRING': _load_multilinestring, 'MULTIPOLYGON': _load_multipolygon, 'GEOMETRYCOLLECTION': _load_geometrycollection, }
localeChanged.d.ts
declare const EVENT_BUS: import("vue/types/vue").CombinedVueInstance<Vue, object, object, object, Record<never, any>>; export declare function localeChanged(): void; export { EVENT_BUS };
import Vue from 'vue';
test_forms.py
from django.test import TestCase from core.govdelivery import MockGovDelivery from data_research.forms import ConferenceRegistrationForm from data_research.models import ConferenceRegistration class ConferenceRegistrationFormTests(TestCase): capacity = 100 govdelivery_code = 'TEST-CODE' govdelivery_question_id = '12345' govdelivery_answer_id = '67890' def test_invalid_form_if_fields_are_missing(self): form = ConferenceRegistrationForm( capacity=self.capacity, govdelivery_code=self.govdelivery_code, govdelivery_question_id=self.govdelivery_question_id, govdelivery_answer_id=self.govdelivery_answer_id, data={'foo': 'bar'} ) self.assertFalse(form.is_valid()) def get_valid_form( self, attendee_type=ConferenceRegistrationForm.ATTENDEE_IN_PERSON, govdelivery_question_id=None, govdelivery_answer_id=None ): return ConferenceRegistrationForm( capacity=self.capacity, govdelivery_code=self.govdelivery_code, govdelivery_question_id=govdelivery_question_id, govdelivery_answer_id=govdelivery_answer_id, data={ 'attendee_type': attendee_type, 'name': 'A User', 'organization': 'An Organization', 'email': '[email protected]', } ) def test_valid_form_if_required_fields_are_provided(self): form = self.get_valid_form() self.assertTrue(form.is_valid()) def test_form_save_commit_false_doesnt_save_user(self): form = self.get_valid_form() form.is_valid() form.save(commit=False) self.assertFalse(ConferenceRegistration.objects.exists()) def test_form_save_commit_false_doesnt_subscribe_to_govdelivery(self):
def test_form_save_sets_registration_code_and_details(self): form = self.get_valid_form() form.is_valid() registrant = form.save(commit=False) self.assertEqual(registrant.govdelivery_code, 'TEST-CODE') self.assertEqual(registrant.details, { 'attendee_type': ConferenceRegistrationForm.ATTENDEE_IN_PERSON, 'name': 'A User', 'organization': 'An Organization', 'email': '[email protected]', 'dietary_restrictions': [], 'other_dietary_restrictions': '', 'accommodations': [], 'other_accommodations': '', }) def test_form_save_commit_true_saves_to_db(self): form = self.get_valid_form() form.is_valid() registrant = form.save() self.assertEqual(registrant, ConferenceRegistration.objects.first()) def test_form_save_commit_true_subscribes_to_gd(self): form = self.get_valid_form() form.is_valid() form.save() self.assertEqual( MockGovDelivery.calls, [( 'set_subscriber_topics', (), { 'contact_details': '[email protected]', 'topic_codes': ['TEST-CODE'], 'send_notifications': True, } )] ) def test_form_save_commit_true_subscribes_and_sets_question(self): form = self.get_valid_form( govdelivery_question_id='12345', govdelivery_answer_id='67890' ) form.is_valid() form.save() self.assertEqual(MockGovDelivery.calls, [ ( 'set_subscriber_topics', (), { 'contact_details': '[email protected]', 'topic_codes': ['TEST-CODE'], 'send_notifications': True, } ), ( 'set_subscriber_answer_to_select_question', (), { 'contact_details': '[email protected]', 'question_id': '12345', 'answer_id': '67890', } ), ]) def make_capacity_registrants(self, govdelivery_code, attendee_type): registrant = ConferenceRegistration( govdelivery_code=govdelivery_code, details={'attendee_type': attendee_type} ) ConferenceRegistration.objects.bulk_create( [registrant] * self.capacity ) def test_form_not_at_capacity(self): self.assertFalse(self.get_valid_form().at_capacity) def test_form_at_capacity(self): self.make_capacity_registrants( self.govdelivery_code, ConferenceRegistrationForm.ATTENDEE_IN_PERSON ) self.assertTrue(self.get_valid_form().at_capacity) def test_form_at_capacity_for_some_other_code(self): self.make_capacity_registrants( 'some-other-code', ConferenceRegistrationForm.ATTENDEE_IN_PERSON ) self.assertFalse(self.get_valid_form().at_capacity) def test_form_at_capacity_invalid(self): self.make_capacity_registrants( self.govdelivery_code, ConferenceRegistrationForm.ATTENDEE_IN_PERSON ) form = self.get_valid_form() self.assertFalse(form.is_valid()) def test_form_at_capacity_still_valid_for_virtual_attendees(self): self.make_capacity_registrants( self.govdelivery_code, ConferenceRegistrationForm.ATTENDEE_IN_PERSON ) form = self.get_valid_form( attendee_type=ConferenceRegistrationForm.ATTENDEE_VIRTUALLY ) self.assertTrue(form.is_valid()) def test_form_virtual_attendees_dont_count_against_capacity(self): self.make_capacity_registrants( self.govdelivery_code, ConferenceRegistrationForm.ATTENDEE_VIRTUALLY ) self.assertFalse(self.get_valid_form().at_capacity)
calls_before = list(MockGovDelivery.calls) form = self.get_valid_form() form.is_valid() form.save(commit=False) self.assertEqual(MockGovDelivery.calls, calls_before)
experience.js
////////////////////////////////////////////////// //// site's data and initial setup ////////////////////////////////////////////////// var timelineTemplate = ` <div class="timeline"> <a href="{2}" class="timeline-content"> <div class="timeline-icon"> <i class="fa"></i> </div> <h3 class="title">{0} | {1}</h3> <p class="description"> {3} - {4} </p> </a> </div> `;
timeline = document.getElementsByClassName("main-timeline")[0]; displayData(experience); }; ////////////////////////////////////////////////// //// working with tabs ////////////////////////////////////////////////// const displayData = (experience) => { for (const data of experience) { addTab(data); } }; const addTab = ({ name, position, link, from, to}) => { timeline.innerHTML += timelineTemplate.format(name, position, link, from, to); };
var timeline; window.onload = () => {
for_loops.py
# Write your solution for 1.1 here! x=0 for i in range(101): x+=i
print(x)
plugin.py
import re import pickle import tempfile import pytest from _pytest.config import Config from _pytest._io.terminalwriter import TerminalWriter from _pytest.reports import TestReport from pytest_fold.tui_pytermtk import main as tuitk from pytest_fold.tui_textual1 import main as tuitxt1 from pytest_fold.tui_textual2 import main as tuitxt2 from pytest_fold.utils import ( test_session_starts_matcher, errors_section_matcher, failures_section_matcher, warnings_summary_matcher, passes_section_matcher, short_test_summary_matcher, lastline_matcher, MARKERS, REPORTFILE, MARKEDTERMINALOUTPUTFILE, UNMARKEDTERMINALOUTPUTFILE, ) # Don't collect tests from any of these files collect_ignore = [ "setup.py", "plugin.py", ] # A list of TestReport objects generated by Pytest during test run. # Each TestReport represents a single test's operation during one of # Pytest's three phases: setup | call | teardown reports = [] def pytest_addoption(parser):
def pytest_report_teststatus(report: TestReport, config: Config): """Construct list(s) of individial TestReport instances""" reports.append(report) @pytest.hookimpl(trylast=True) def pytest_configure(config: Config) -> None: """ Write console output to a file for use by TUI This code works by looking at every line sent by Pytest to the terminal, and based on its category, marking or not marking it """ config.option.verbose = ( 1 # force verbose mode for easier parsing of final test results ) config.option.reportchars = ( "A" # force "display all" mode so all results can be shown ) if config.option.fold: tr = config.pluginmanager.getplugin("terminalreporter") if tr is not None: # identify and mark the very first line of terminal output try: config._pyfoldfirsttime except AttributeError: config._pyfoldfirsttime = True config._pyfold_unmarked_outputfile = tempfile.TemporaryFile("wb+") config._pyfold_marked_outputfile = tempfile.TemporaryFile("wb+") oldwrite = tr._tw.write # identify and mark each results section def tee_write(s, **kwargs): if re.search(test_session_starts_matcher, s): config._pyfold_marked_outputfile.write( (MARKERS["pytest_fold_test_session_starts"] + "\n").encode( "utf-8" ) ) if re.search(errors_section_matcher, s): config._pyfold_marked_outputfile.write( (MARKERS["pytest_fold_errors_section"] + "\n").encode("utf-8") ) if re.search(failures_section_matcher, s): config._pyfold_marked_outputfile.write( (MARKERS["pytest_fold_failures_section"] + "\n").encode("utf-8") ) if re.search(warnings_summary_matcher, s): config._pyfold_marked_outputfile.write( (MARKERS["pytest_fold_warnings_summary"] + "\n").encode("utf-8") ) if re.search(passes_section_matcher, s): config._pyfold_marked_outputfile.write( (MARKERS["pytest_fold_passes_section"] + "\n").encode("utf-8") ) if re.search(short_test_summary_matcher, s): config._pyfold_marked_outputfile.write( (MARKERS["pytest_fold_short_test_summary"] + "\n").encode( "utf-8" ) ) if re.search(lastline_matcher, s): config._pyfold_marked_outputfile.write( (MARKERS["pytest_fold_last_line"] + "\n").encode("utf-8") ) # Write this line's text along with its markup info to console oldwrite(s, **kwargs) # Mark up this line's text by passing it to an instance of TerminalWriter's # 'markup' method. Do not pass "flush" to the method or it will throw an error. s1 = s kwargs.pop("flush") if "flush" in kwargs.keys() else None s1 = TerminalWriter().markup(s, **kwargs) # Encode the marked up line so it can be written to the config object. # The Pytest config object can be used by plugins for conveying staeful # info across an entire test run session. if isinstance(s1, str): marked_up = s1.encode("utf-8") config._pyfold_marked_outputfile.write(marked_up) # Write this line's original (unmarked) text to unmarked file s_orig = s kwargs.pop("flush") if "flush" in kwargs.keys() else None s_orig = TerminalWriter().markup(s, **kwargs) if isinstance(s_orig, str): unmarked_up = s_orig.encode("utf-8") config._pyfold_unmarked_outputfile.write(unmarked_up) # Write to both terminal/console and tempfiles: # _pyfold_marked_outputfile, _pyfold_unmarked_outputfile tr._tw.write = tee_write def pytest_unconfigure(config: Config): """ Write terminal and test results info to files for use by TUI """ # Write terminal output to file if hasattr(config, "_pyfold_marked_outputfile"): # get terminal contents, then write file config._pyfold_marked_outputfile.seek(0) markedsessionlog = config._pyfold_marked_outputfile.read() config._pyfold_marked_outputfile.close() if hasattr(config, "_pyfold_unmarked_outputfile"): # get terminal contents, then write file config._pyfold_unmarked_outputfile.seek(0) unmarkedsessionlog = config._pyfold_unmarked_outputfile.read() config._pyfold_unmarked_outputfile.close() # Undo our patching in the terminal reporter config.pluginmanager.getplugin("terminalreporter") # Write marked-up results to file with open(MARKEDTERMINALOUTPUTFILE, "wb") as marked_file: marked_file.write(markedsessionlog) # Write un-marked-up results to file with open(UNMARKEDTERMINALOUTPUTFILE, "wb") as unmarked_file: unmarked_file.write(unmarkedsessionlog) # Write the reports list to file with open(REPORTFILE, "wb") as report_file: pickle.dump(reports, report_file) # Launch the TUI if config.getoption("--fold") == True: pyfold_tui(config) def pyfold_tui(config: Config) -> None: """ Final code invocation after Pytest run has completed. This method calls the Pyfold TUI to display final results. """ # disable capturing while TUI runs to avoid error `redirected stdin is pseudofile, has # no fileno()`; adapted from https://githubmemory.com/repo/jsbueno/terminedia/issues/25 if not config.getoption("--fold"): return capmanager = config.pluginmanager.getplugin("capturemanager") try: capmanager.suspend_global_capture(in_=True) finally: if config.getoption("--ft") in ["k", "pytermtk"]: tuitk() elif config.getoption("--ft") in ["t1", "textual1"]: tuitxt1() elif config.getoption("--ft") in ["t2", "textual2"]: tuitxt2() elif config.getoption("--ft") not in ["n", "none"]: print(f"Incorrect choice for fold-tui: {config.getoption('--ft')}") capmanager.resume_global_capture()
"""Define the plugin's option flags as presented by Pytest""" group = parser.getgroup("fold") group.addoption( "--fold", action="store_true", help="fold failed test output sections", ) group.addoption( "--fold-tui", "--ft", action="store", default="pytermtk", help="specify user interface ('pytermtk' ' k' | 'textual1' 't1' | 'textual2' 't2' | 'none' 'n')", choices=["pytermtk", "k", "textual1", "t1", "textual2", "t2", "none", "n"], )
path_manager.py
# Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os.path import posixpath import web_idl from . import name_style from .blink_v8_bridge import blink_class_name class PathManager(object): """ Provides a variety of paths such as Blink headers and output files. Unless explicitly specified, returned paths are relative to the project's root directory or the root directory of generated files, e.g. "third_party/blink/renderer/..." Relative paths are represented in POSIX style so that it fits nicely in generated code, e.g. #include "third_party/blink/renderer/...", while absolute paths are represented in a platform-specific style so that it works well with a platform-specific notion, e.g. a drive letter in Windows path such as "C:\\chromium\\src\\...". About output files, there are two cases. - cross-components case: APIs are generated in 'core' and implementations are generated in 'modules'. - single component case: Everything is generated in a single component. """ _REQUIRE_INIT_MESSAGE = ("PathManager.init must be called in advance.") _is_initialized = False @classmethod def init(cls, root_src_dir, root_gen_dir, component_reldirs): """ Args: root_src_dir: Project's root directory, which corresponds to "//" in GN. root_gen_dir: Root directory of generated files, which corresponds to "//out/Default/gen" in GN. component_reldirs: Pairs of component and output directory relative to |root_gen_dir|. """ assert not cls._is_initialized assert isinstance(root_src_dir, str) assert isinstance(root_gen_dir, str) assert isinstance(component_reldirs, dict) cls._blink_path_prefix = posixpath.sep + posixpath.join( "third_party", "blink", "renderer", "") cls._root_src_dir = os.path.abspath(root_src_dir) cls._root_gen_dir = os.path.abspath(root_gen_dir) cls._component_reldirs = { component: posixpath.normpath(rel_dir) for component, rel_dir in component_reldirs.items() } cls._is_initialized = True @classmethod def component_path(cls, component, filepath): """ Returns the relative path to |filepath| in |component|'s directory. """ assert cls._is_initialized, cls._REQUIRE_INIT_MESSAGE return posixpath.join(cls._component_reldirs[component], filepath) @classmethod def gen_path_to(cls, path): """ Returns the absolute path of |path| that must be relative to the root directory of generated files. """ assert cls._is_initialized, cls._REQUIRE_INIT_MESSAGE return os.path.abspath(os.path.join(cls._root_gen_dir, path)) @classmethod def src_path_to(cls, path): """ Returns the absolute path of |path| that must be relative to the project root directory. """ assert cls._is_initialized, cls._REQUIRE_INIT_MESSAGE return os.path.abspath(os.path.join(cls._root_src_dir, path)) def __init__(self, idl_definition): assert self._is_initialized, self._REQUIRE_INIT_MESSAGE components = sorted(idl_definition.components) # "core" < "modules" if len(components) == 0: assert isinstance(idl_definition, (web_idl.Union, web_idl.NewUnion)) # Unions of built-in types, e.g. (double or DOMString), do not have # a component. self._is_cross_components = False default_component = web_idl.Component("core") self._api_component = default_component self._impl_component = default_component elif len(components) == 1: component = components[0] self._is_cross_components = False self._api_component = component self._impl_component = component elif len(components) == 2: assert components[0] == "core" assert components[1] == "modules" self._is_cross_components = True # Union does not support cross-component code generation because # clients of IDL union must be on an upper or same layer to any of # union members. if isinstance(idl_definition, (web_idl.Union, web_idl.NewUnion)): self._api_component = components[1] else: self._api_component = components[0] self._impl_component = components[1] else: assert False self._api_dir = self._component_reldirs[self._api_component] self._impl_dir = self._component_reldirs[self._impl_component] self._api_basename = name_style.file("v8", idl_definition.identifier) self._impl_basename = name_style.file("v8", idl_definition.identifier) if isinstance(idl_definition, web_idl.NewUnion): # In case of IDL unions, underscore is used as a separator of union # members, so we don't want any underscore inside a union member. # For example, (Foo or Bar or Baz) and (FooBar or Baz) are defined # in v8_union_foo_bar_baz.ext and v8_union_foobar_baz.ext # respectively. # # Avoid name_style.file not to make "Int32Array" into # "int_32_array". filename = "v8_union_{}".format("_".join( idl_definition.member_tokens)).lower() self._api_basename = filename self._impl_basename = filename elif isinstance(idl_definition, web_idl.Union): union_class_name = idl_definition.identifier union_filepath = _BACKWARD_COMPATIBLE_UNION_FILEPATHS.get( union_class_name, union_class_name) self._api_basename = name_style.file(union_filepath) self._impl_basename = name_style.file(union_filepath) if isinstance(idl_definition, (web_idl.Union, web_idl.NewUnion)): self._blink_dir = None self._blink_basename = None else: idl_path = idl_definition.debug_info.location.filepath self._blink_dir = posixpath.dirname(idl_path) self._blink_basename = name_style.file( blink_class_name(idl_definition)) @property def is_cross_components(self): return self._is_cross_components @property def api_component(self):
@property def api_dir(self): return self._api_dir def api_path(self, filename=None, ext=None): return self._join( dirpath=self.api_dir, filename=(filename or self._api_basename), ext=ext) @property def impl_component(self): return self._impl_component @property def impl_dir(self): return self._impl_dir def impl_path(self, filename=None, ext=None): return self._join( dirpath=self.impl_dir, filename=(filename or self._impl_basename), ext=ext) @property def blink_dir(self): return self._blink_dir def blink_path(self, filename=None, ext=None): return self._join( dirpath=self.blink_dir, filename=(filename or self._blink_basename), ext=ext) @staticmethod def _join(dirpath, filename, ext=None): if ext is not None: filename = posixpath.extsep.join([filename, ext]) return posixpath.join(dirpath, filename) # A hack to make the filepaths to generated IDL unions compatible with the old # bindings generator. # # Copied from |shorten_union_name| defined in # //third_party/blink/renderer/bindings/scripts/utilities.py _BACKWARD_COMPATIBLE_UNION_FILEPATHS = { # modules/canvas2d/CanvasRenderingContext2D.idl "CSSImageValueOrHTMLImageElementOrSVGImageElementOrHTMLVideoElementOrHTMLCanvasElementOrImageBitmapOrOffscreenCanvasOrVideoFrame": "CanvasImageSource", # modules/canvas/htmlcanvas/html_canvas_element_module.idl "CanvasRenderingContext2DOrWebGLRenderingContextOrWebGL2RenderingContextOrImageBitmapRenderingContextOrGPUCanvasContext": "RenderingContext", # core/frame/window_or_worker_global_scope.idl "HTMLImageElementOrSVGImageElementOrHTMLVideoElementOrHTMLCanvasElementOrBlobOrImageDataOrImageBitmapOrOffscreenCanvasOrVideoFrame": "ImageBitmapSource", # bindings/tests/idls/core/TestTypedefs.idl "NodeOrLongSequenceOrEventOrXMLHttpRequestOrStringOrStringByteStringOrNodeListRecord": "NestedUnionType", # modules/canvas/offscreencanvas/offscreen_canvas_module.idl "OffscreenCanvasRenderingContext2DOrWebGLRenderingContextOrWebGL2RenderingContextOrImageBitmapRenderingContext": "OffscreenRenderingContext", # core/xmlhttprequest/xml_http_request.idl "DocumentOrBlobOrArrayBufferOrArrayBufferViewOrFormDataOrURLSearchParamsOrUSVString": "DocumentOrXMLHttpRequestBodyInit", # modules/beacon/navigator_beacon.idl 'ReadableStreamOrBlobOrArrayBufferOrArrayBufferViewOrFormDataOrURLSearchParamsOrUSVString': 'ReadableStreamOrXMLHttpRequestBodyInit', # modules/mediasource/source_buffer.idl 'EncodedAudioChunkOrEncodedVideoChunkSequenceOrEncodedAudioChunkOrEncodedVideoChunk': 'EncodedAVChunkSequenceOrEncodedAVChunk', }
return self._api_component
personGroupOperations.js
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ 'use strict'; const msRest = require('ms-rest'); const WebResource = msRest.WebResource; /** * Create a new person group with specified personGroupId, name and * user-provided userData. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {string} [options.name] User defined name, maximum length is 128. * * @param {string} [options.userData] User specified data. Length should not * exceed 16KB. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} callback - The callback. * * @returns {function} callback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ function _create(personGroupId, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { callback = options; options = null; } if (!callback) { throw new Error('callback cannot be null.'); } let name = (options && options.name !== undefined) ? options.name : undefined; let userData = (options && options.userData !== undefined) ? options.userData : undefined; // Validate try { if (this.client.endpoint === null || this.client.endpoint === undefined || typeof this.client.endpoint.valueOf() !== 'string') { throw new Error('this.client.endpoint cannot be null or undefined and it must be of type string.'); } if (personGroupId === null || personGroupId === undefined || typeof personGroupId.valueOf() !== 'string') { throw new Error('personGroupId cannot be null or undefined and it must be of type string.'); } if (personGroupId !== null && personGroupId !== undefined) { if (personGroupId.length > 64) { throw new Error('"personGroupId" should satisfy the constraint - "MaxLength": 64'); } if (personGroupId.match(/^[a-z0-9-_]+$/) === null) { throw new Error('"personGroupId" should satisfy the constraint - "Pattern": /^[a-z0-9-_]+$/'); } } if (name !== null && name !== undefined && typeof name.valueOf() !== 'string') { throw new Error('name must be of type string.'); } if (name !== null && name !== undefined) { if (name.length > 128) { throw new Error('"name" should satisfy the constraint - "MaxLength": 128'); } } if (userData !== null && userData !== undefined && typeof userData.valueOf() !== 'string') { throw new Error('userData must be of type string.'); } if (userData !== null && userData !== undefined) { if (userData.length > 16384) { throw new Error('"userData" should satisfy the constraint - "MaxLength": 16384'); } } } catch (error) { return callback(error); } let body; if ((name !== null && name !== undefined) || (userData !== null && userData !== undefined)) { body = new client.models['NameAndUserDataContract'](); body.name = name; body.userData = userData; } // Construct URL let baseUrl = this.client.baseUri; let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'persongroups/{personGroupId}'; requestUrl = requestUrl.replace('{Endpoint}', this.client.endpoint); requestUrl = requestUrl.replace('{personGroupId}', encodeURIComponent(personGroupId)); // Create HTTP transport objects let httpRequest = new WebResource(); httpRequest.method = 'PUT'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; if(options) { for(let headerName in options['customHeaders']) { if (options['customHeaders'].hasOwnProperty(headerName)) { httpRequest.headers[headerName] = options['customHeaders'][headerName]; } } } // Serialize Request let requestContent = null; let requestModel = null; try { if (body !== null && body !== undefined) { let requestModelMapper = new client.models['NameAndUserDataContract']().mapper(); requestModel = client.serialize(requestModelMapper, body, 'body'); requestContent = JSON.stringify(requestModel); } } catch (error) { let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + `payload - ${JSON.stringify(body, null, 2)}.`); return callback(serializationError); } httpRequest.body = requestContent; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); error.response = msRest.stripResponse(response); if (responseBody === '') responseBody = null; let parsedErrorResponse; try { parsedErrorResponse = JSON.parse(responseBody); if (parsedErrorResponse) { let internalError = null; if (parsedErrorResponse.error) internalError = parsedErrorResponse.error; error.code = internalError ? internalError.code : parsedErrorResponse.code; error.message = internalError ? internalError.message : parsedErrorResponse.message; } if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { let resultMapper = new client.models['APIError']().mapper(); error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); } } catch (defaultError) { error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + `- "${responseBody}" for the default response.`; return callback(error); } return callback(error); } // Create Result let result = null; if (responseBody === '') responseBody = null; return callback(null, result, httpRequest, response); }); } /** * Delete an existing person group. Persisted face features of all people in * the person group will also be deleted. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} callback - The callback. * * @returns {function} callback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ function _deleteMethod(personGroupId, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { callback = options; options = null; } if (!callback) { throw new Error('callback cannot be null.'); } // Validate try { if (this.client.endpoint === null || this.client.endpoint === undefined || typeof this.client.endpoint.valueOf() !== 'string') { throw new Error('this.client.endpoint cannot be null or undefined and it must be of type string.'); } if (personGroupId === null || personGroupId === undefined || typeof personGroupId.valueOf() !== 'string') { throw new Error('personGroupId cannot be null or undefined and it must be of type string.'); } if (personGroupId !== null && personGroupId !== undefined) { if (personGroupId.length > 64) { throw new Error('"personGroupId" should satisfy the constraint - "MaxLength": 64'); } if (personGroupId.match(/^[a-z0-9-_]+$/) === null) { throw new Error('"personGroupId" should satisfy the constraint - "Pattern": /^[a-z0-9-_]+$/'); } } } catch (error) { return callback(error); } // Construct URL let baseUrl = this.client.baseUri; let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'persongroups/{personGroupId}'; requestUrl = requestUrl.replace('{Endpoint}', this.client.endpoint); requestUrl = requestUrl.replace('{personGroupId}', encodeURIComponent(personGroupId)); // Create HTTP transport objects let httpRequest = new WebResource(); httpRequest.method = 'DELETE'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; if(options) { for(let headerName in options['customHeaders']) { if (options['customHeaders'].hasOwnProperty(headerName)) { httpRequest.headers[headerName] = options['customHeaders'][headerName]; } } } httpRequest.body = null; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); error.response = msRest.stripResponse(response); if (responseBody === '') responseBody = null; let parsedErrorResponse; try { parsedErrorResponse = JSON.parse(responseBody); if (parsedErrorResponse) { let internalError = null; if (parsedErrorResponse.error) internalError = parsedErrorResponse.error; error.code = internalError ? internalError.code : parsedErrorResponse.code; error.message = internalError ? internalError.message : parsedErrorResponse.message; } if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { let resultMapper = new client.models['APIError']().mapper(); error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); } } catch (defaultError) { error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + `- "${responseBody}" for the default response.`; return callback(error); } return callback(error); } // Create Result let result = null; if (responseBody === '') responseBody = null; return callback(null, result, httpRequest, response); }); } /** * Retrieve the information of a person group, including its name and userData. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} callback - The callback. * * @returns {function} callback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. * See {@link PersonGroup} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ function _get(personGroupId, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { callback = options; options = null; } if (!callback) { throw new Error('callback cannot be null.'); } // Validate try { if (this.client.endpoint === null || this.client.endpoint === undefined || typeof this.client.endpoint.valueOf() !== 'string') { throw new Error('this.client.endpoint cannot be null or undefined and it must be of type string.'); } if (personGroupId === null || personGroupId === undefined || typeof personGroupId.valueOf() !== 'string') { throw new Error('personGroupId cannot be null or undefined and it must be of type string.'); } if (personGroupId !== null && personGroupId !== undefined) { if (personGroupId.length > 64) { throw new Error('"personGroupId" should satisfy the constraint - "MaxLength": 64'); } if (personGroupId.match(/^[a-z0-9-_]+$/) === null) { throw new Error('"personGroupId" should satisfy the constraint - "Pattern": /^[a-z0-9-_]+$/'); } } } catch (error) { return callback(error); } // Construct URL let baseUrl = this.client.baseUri; let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'persongroups/{personGroupId}'; requestUrl = requestUrl.replace('{Endpoint}', this.client.endpoint); requestUrl = requestUrl.replace('{personGroupId}', encodeURIComponent(personGroupId)); // Create HTTP transport objects let httpRequest = new WebResource(); httpRequest.method = 'GET'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; if(options) { for(let headerName in options['customHeaders']) { if (options['customHeaders'].hasOwnProperty(headerName)) { httpRequest.headers[headerName] = options['customHeaders'][headerName]; } } } httpRequest.body = null; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); error.response = msRest.stripResponse(response); if (responseBody === '') responseBody = null; let parsedErrorResponse; try { parsedErrorResponse = JSON.parse(responseBody); if (parsedErrorResponse) { let internalError = null; if (parsedErrorResponse.error) internalError = parsedErrorResponse.error; error.code = internalError ? internalError.code : parsedErrorResponse.code; error.message = internalError ? internalError.message : parsedErrorResponse.message; } if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { let resultMapper = new client.models['APIError']().mapper(); error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); } } catch (defaultError) { error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + `- "${responseBody}" for the default response.`; return callback(error); } return callback(error); } // Create Result let result = null; if (responseBody === '') responseBody = null; // Deserialize Response if (statusCode === 200) { let parsedResponse = null; try { parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { let resultMapper = new client.models['PersonGroup']().mapper(); result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); deserializationError.request = msRest.stripRequest(httpRequest); deserializationError.response = msRest.stripResponse(response); return callback(deserializationError); } } return callback(null, result, httpRequest, response); }); } /** * Update an existing person group's display name and userData. The properties * which does not appear in request body will not be updated. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {string} [options.name] User defined name, maximum length is 128. * * @param {string} [options.userData] User specified data. Length should not * exceed 16KB. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} callback - The callback. * * @returns {function} callback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ function _update(personGroupId, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { callback = options; options = null; } if (!callback) { throw new Error('callback cannot be null.'); } let name = (options && options.name !== undefined) ? options.name : undefined; let userData = (options && options.userData !== undefined) ? options.userData : undefined; // Validate try { if (this.client.endpoint === null || this.client.endpoint === undefined || typeof this.client.endpoint.valueOf() !== 'string') { throw new Error('this.client.endpoint cannot be null or undefined and it must be of type string.'); } if (personGroupId === null || personGroupId === undefined || typeof personGroupId.valueOf() !== 'string') { throw new Error('personGroupId cannot be null or undefined and it must be of type string.'); } if (personGroupId !== null && personGroupId !== undefined) { if (personGroupId.length > 64) { throw new Error('"personGroupId" should satisfy the constraint - "MaxLength": 64'); } if (personGroupId.match(/^[a-z0-9-_]+$/) === null) { throw new Error('"personGroupId" should satisfy the constraint - "Pattern": /^[a-z0-9-_]+$/'); } } if (name !== null && name !== undefined && typeof name.valueOf() !== 'string') { throw new Error('name must be of type string.'); } if (name !== null && name !== undefined) { if (name.length > 128) { throw new Error('"name" should satisfy the constraint - "MaxLength": 128'); } } if (userData !== null && userData !== undefined && typeof userData.valueOf() !== 'string') { throw new Error('userData must be of type string.'); } if (userData !== null && userData !== undefined) { if (userData.length > 16384) { throw new Error('"userData" should satisfy the constraint - "MaxLength": 16384'); } } } catch (error) { return callback(error); } let body; if ((name !== null && name !== undefined) || (userData !== null && userData !== undefined)) { body = new client.models['NameAndUserDataContract'](); body.name = name; body.userData = userData; } // Construct URL let baseUrl = this.client.baseUri; let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'persongroups/{personGroupId}'; requestUrl = requestUrl.replace('{Endpoint}', this.client.endpoint); requestUrl = requestUrl.replace('{personGroupId}', encodeURIComponent(personGroupId)); // Create HTTP transport objects let httpRequest = new WebResource(); httpRequest.method = 'PATCH'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; if(options) { for(let headerName in options['customHeaders']) { if (options['customHeaders'].hasOwnProperty(headerName)) { httpRequest.headers[headerName] = options['customHeaders'][headerName]; } } } // Serialize Request let requestContent = null; let requestModel = null; try { if (body !== null && body !== undefined) { let requestModelMapper = new client.models['NameAndUserDataContract']().mapper(); requestModel = client.serialize(requestModelMapper, body, 'body'); requestContent = JSON.stringify(requestModel); } } catch (error) { let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + `payload - ${JSON.stringify(body, null, 2)}.`); return callback(serializationError); } httpRequest.body = requestContent; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); error.response = msRest.stripResponse(response); if (responseBody === '') responseBody = null; let parsedErrorResponse; try { parsedErrorResponse = JSON.parse(responseBody); if (parsedErrorResponse) { let internalError = null; if (parsedErrorResponse.error) internalError = parsedErrorResponse.error; error.code = internalError ? internalError.code : parsedErrorResponse.code; error.message = internalError ? internalError.message : parsedErrorResponse.message; } if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { let resultMapper = new client.models['APIError']().mapper(); error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); } } catch (defaultError) { error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + `- "${responseBody}" for the default response.`; return callback(error); } return callback(error); } // Create Result let result = null; if (responseBody === '') responseBody = null; return callback(null, result, httpRequest, response); }); } /** * Retrieve the training status of a person group (completed or ongoing). * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} callback - The callback. * * @returns {function} callback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. * See {@link TrainingStatus} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ function _getTrainingStatus(personGroupId, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { callback = options; options = null; } if (!callback) { throw new Error('callback cannot be null.'); } // Validate try { if (this.client.endpoint === null || this.client.endpoint === undefined || typeof this.client.endpoint.valueOf() !== 'string') { throw new Error('this.client.endpoint cannot be null or undefined and it must be of type string.'); } if (personGroupId === null || personGroupId === undefined || typeof personGroupId.valueOf() !== 'string') { throw new Error('personGroupId cannot be null or undefined and it must be of type string.'); } if (personGroupId !== null && personGroupId !== undefined) { if (personGroupId.length > 64) { throw new Error('"personGroupId" should satisfy the constraint - "MaxLength": 64'); } if (personGroupId.match(/^[a-z0-9-_]+$/) === null) { throw new Error('"personGroupId" should satisfy the constraint - "Pattern": /^[a-z0-9-_]+$/'); } } } catch (error) { return callback(error); } // Construct URL let baseUrl = this.client.baseUri; let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'persongroups/{personGroupId}/training'; requestUrl = requestUrl.replace('{Endpoint}', this.client.endpoint); requestUrl = requestUrl.replace('{personGroupId}', encodeURIComponent(personGroupId)); // Create HTTP transport objects let httpRequest = new WebResource(); httpRequest.method = 'GET'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; if(options) { for(let headerName in options['customHeaders']) { if (options['customHeaders'].hasOwnProperty(headerName)) { httpRequest.headers[headerName] = options['customHeaders'][headerName]; } } } httpRequest.body = null; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); error.response = msRest.stripResponse(response); if (responseBody === '') responseBody = null; let parsedErrorResponse; try { parsedErrorResponse = JSON.parse(responseBody); if (parsedErrorResponse) { let internalError = null; if (parsedErrorResponse.error) internalError = parsedErrorResponse.error; error.code = internalError ? internalError.code : parsedErrorResponse.code; error.message = internalError ? internalError.message : parsedErrorResponse.message; } if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { let resultMapper = new client.models['APIError']().mapper(); error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); } } catch (defaultError) { error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + `- "${responseBody}" for the default response.`; return callback(error); } return callback(error); } // Create Result let result = null; if (responseBody === '') responseBody = null; // Deserialize Response if (statusCode === 200) { let parsedResponse = null; try { parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { let resultMapper = new client.models['TrainingStatus']().mapper(); result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); deserializationError.request = msRest.stripRequest(httpRequest); deserializationError.response = msRest.stripResponse(response); return callback(deserializationError); } } return callback(null, result, httpRequest, response); }); } /** * List person groups and their information. * * @param {object} [options] Optional Parameters. * * @param {string} [options.start] List person groups from the least * personGroupId greater than the "start". * * @param {number} [options.top] The number of person groups to list. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} callback - The callback. * * @returns {function} callback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {array} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ function _list(options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { callback = options; options = null; } if (!callback) { throw new Error('callback cannot be null.'); } let start = (options && options.start !== undefined) ? options.start : undefined; let top = (options && options.top !== undefined) ? options.top : 1000; // Validate try { if (this.client.endpoint === null || this.client.endpoint === undefined || typeof this.client.endpoint.valueOf() !== 'string') { throw new Error('this.client.endpoint cannot be null or undefined and it must be of type string.'); } if (start !== null && start !== undefined && typeof start.valueOf() !== 'string') { throw new Error('start must be of type string.'); } if (start !== null && start !== undefined) { if (start.length > 64) { throw new Error('"start" should satisfy the constraint - "MaxLength": 64'); } } if (top !== null && top !== undefined && typeof top !== 'number') { throw new Error('top must be of type number.'); } if (top !== null && top !== undefined) { if (top > 1000) { throw new Error('"top" should satisfy the constraint - "InclusiveMaximum": 1000'); } if (top < 1) { throw new Error('"top" should satisfy the constraint - "InclusiveMinimum": 1'); } } } catch (error) { return callback(error); } // Construct URL let baseUrl = this.client.baseUri; let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'persongroups'; requestUrl = requestUrl.replace('{Endpoint}', this.client.endpoint); let queryParameters = []; if (start !== null && start !== undefined) { queryParameters.push('start=' + encodeURIComponent(start)); } if (top !== null && top !== undefined) { queryParameters.push('top=' + encodeURIComponent(top.toString())); } if (queryParameters.length > 0) { requestUrl += '?' + queryParameters.join('&'); } // Create HTTP transport objects let httpRequest = new WebResource(); httpRequest.method = 'GET'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; if(options) { for(let headerName in options['customHeaders']) { if (options['customHeaders'].hasOwnProperty(headerName)) { httpRequest.headers[headerName] = options['customHeaders'][headerName]; } } } httpRequest.body = null; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); error.response = msRest.stripResponse(response); if (responseBody === '') responseBody = null; let parsedErrorResponse; try { parsedErrorResponse = JSON.parse(responseBody); if (parsedErrorResponse) { let internalError = null; if (parsedErrorResponse.error) internalError = parsedErrorResponse.error; error.code = internalError ? internalError.code : parsedErrorResponse.code; error.message = internalError ? internalError.message : parsedErrorResponse.message; } if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { let resultMapper = new client.models['APIError']().mapper(); error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); } } catch (defaultError) { error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + `- "${responseBody}" for the default response.`; return callback(error); } return callback(error); } // Create Result let result = null; if (responseBody === '') responseBody = null; // Deserialize Response if (statusCode === 200) { let parsedResponse = null; try { parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { let resultMapper = { required: false, serializedName: 'parsedResponse', type: { name: 'Sequence', element: { required: false, serializedName: 'PersonGroupElementType', type: { name: 'Composite', className: 'PersonGroup' } } } }; result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); deserializationError.request = msRest.stripRequest(httpRequest); deserializationError.response = msRest.stripResponse(response); return callback(deserializationError); } } return callback(null, result, httpRequest, response); }); } /** * Queue a person group training task, the training task may not be started * immediately. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} callback - The callback. * * @returns {function} callback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ function _train(personGroupId, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { callback = options; options = null; } if (!callback) { throw new Error('callback cannot be null.'); } // Validate try { if (this.client.endpoint === null || this.client.endpoint === undefined || typeof this.client.endpoint.valueOf() !== 'string') { throw new Error('this.client.endpoint cannot be null or undefined and it must be of type string.'); } if (personGroupId === null || personGroupId === undefined || typeof personGroupId.valueOf() !== 'string') { throw new Error('personGroupId cannot be null or undefined and it must be of type string.'); } if (personGroupId !== null && personGroupId !== undefined) { if (personGroupId.length > 64) { throw new Error('"personGroupId" should satisfy the constraint - "MaxLength": 64'); } if (personGroupId.match(/^[a-z0-9-_]+$/) === null) { throw new Error('"personGroupId" should satisfy the constraint - "Pattern": /^[a-z0-9-_]+$/'); } } } catch (error) { return callback(error); } // Construct URL let baseUrl = this.client.baseUri; let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'persongroups/{personGroupId}/train'; requestUrl = requestUrl.replace('{Endpoint}', this.client.endpoint); requestUrl = requestUrl.replace('{personGroupId}', encodeURIComponent(personGroupId)); // Create HTTP transport objects let httpRequest = new WebResource(); httpRequest.method = 'POST'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; if(options) { for(let headerName in options['customHeaders']) { if (options['customHeaders'].hasOwnProperty(headerName)) { httpRequest.headers[headerName] = options['customHeaders'][headerName]; } } } httpRequest.body = null; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; if (statusCode !== 202) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); error.response = msRest.stripResponse(response); if (responseBody === '') responseBody = null; let parsedErrorResponse; try { parsedErrorResponse = JSON.parse(responseBody); if (parsedErrorResponse) { let internalError = null; if (parsedErrorResponse.error) internalError = parsedErrorResponse.error; error.code = internalError ? internalError.code : parsedErrorResponse.code; error.message = internalError ? internalError.message : parsedErrorResponse.message; } if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { let resultMapper = new client.models['APIError']().mapper(); error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); } } catch (defaultError) { error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + `- "${responseBody}" for the default response.`; return callback(error); } return callback(error); } // Create Result let result = null; if (responseBody === '') responseBody = null; return callback(null, result, httpRequest, response); }); } /** Class representing a PersonGroupOperations. */ class
{ /** * Create a PersonGroupOperations. * @param {FaceClient} client Reference to the service client. */ constructor(client) { this.client = client; this._create = _create; this._deleteMethod = _deleteMethod; this._get = _get; this._update = _update; this._getTrainingStatus = _getTrainingStatus; this._list = _list; this._train = _train; } /** * Create a new person group with specified personGroupId, name and * user-provided userData. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {string} [options.name] User defined name, maximum length is 128. * * @param {string} [options.userData] User specified data. Length should not * exceed 16KB. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error} - The error object. */ createWithHttpOperationResponse(personGroupId, options) { let client = this.client; let self = this; return new Promise((resolve, reject) => { self._create(personGroupId, options, (err, result, request, response) => { let httpOperationResponse = new msRest.HttpOperationResponse(request, response); httpOperationResponse.body = result; if (err) { reject(err); } else { resolve(httpOperationResponse); } return; }); }); } /** * Create a new person group with specified personGroupId, name and * user-provided userData. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {string} [options.name] User defined name, maximum length is 128. * * @param {string} [options.userData] User specified data. Length should not * exceed 16KB. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} [optionalCallback] - The optional callback. * * @returns {function|Promise} If a callback was passed as the last parameter * then it returns the callback else returns a Promise. * * {Promise} A promise is returned * * @resolve {null} - The deserialized result object. * * @reject {Error} - The error object. * * {function} optionalCallback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ create(personGroupId, options, optionalCallback) { let client = this.client; let self = this; if (!optionalCallback && typeof options === 'function') { optionalCallback = options; options = null; } if (!optionalCallback) { return new Promise((resolve, reject) => { self._create(personGroupId, options, (err, result, request, response) => { if (err) { reject(err); } else { resolve(result); } return; }); }); } else { return self._create(personGroupId, options, optionalCallback); } } /** * Delete an existing person group. Persisted face features of all people in * the person group will also be deleted. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error} - The error object. */ deleteMethodWithHttpOperationResponse(personGroupId, options) { let client = this.client; let self = this; return new Promise((resolve, reject) => { self._deleteMethod(personGroupId, options, (err, result, request, response) => { let httpOperationResponse = new msRest.HttpOperationResponse(request, response); httpOperationResponse.body = result; if (err) { reject(err); } else { resolve(httpOperationResponse); } return; }); }); } /** * Delete an existing person group. Persisted face features of all people in * the person group will also be deleted. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} [optionalCallback] - The optional callback. * * @returns {function|Promise} If a callback was passed as the last parameter * then it returns the callback else returns a Promise. * * {Promise} A promise is returned * * @resolve {null} - The deserialized result object. * * @reject {Error} - The error object. * * {function} optionalCallback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ deleteMethod(personGroupId, options, optionalCallback) { let client = this.client; let self = this; if (!optionalCallback && typeof options === 'function') { optionalCallback = options; options = null; } if (!optionalCallback) { return new Promise((resolve, reject) => { self._deleteMethod(personGroupId, options, (err, result, request, response) => { if (err) { reject(err); } else { resolve(result); } return; }); }); } else { return self._deleteMethod(personGroupId, options, optionalCallback); } } /** * Retrieve the information of a person group, including its name and userData. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<PersonGroup>} - The deserialized result object. * * @reject {Error} - The error object. */ getWithHttpOperationResponse(personGroupId, options) { let client = this.client; let self = this; return new Promise((resolve, reject) => { self._get(personGroupId, options, (err, result, request, response) => { let httpOperationResponse = new msRest.HttpOperationResponse(request, response); httpOperationResponse.body = result; if (err) { reject(err); } else { resolve(httpOperationResponse); } return; }); }); } /** * Retrieve the information of a person group, including its name and userData. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} [optionalCallback] - The optional callback. * * @returns {function|Promise} If a callback was passed as the last parameter * then it returns the callback else returns a Promise. * * {Promise} A promise is returned * * @resolve {PersonGroup} - The deserialized result object. * * @reject {Error} - The error object. * * {function} optionalCallback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. * See {@link PersonGroup} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ get(personGroupId, options, optionalCallback) { let client = this.client; let self = this; if (!optionalCallback && typeof options === 'function') { optionalCallback = options; options = null; } if (!optionalCallback) { return new Promise((resolve, reject) => { self._get(personGroupId, options, (err, result, request, response) => { if (err) { reject(err); } else { resolve(result); } return; }); }); } else { return self._get(personGroupId, options, optionalCallback); } } /** * Update an existing person group's display name and userData. The properties * which does not appear in request body will not be updated. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {string} [options.name] User defined name, maximum length is 128. * * @param {string} [options.userData] User specified data. Length should not * exceed 16KB. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error} - The error object. */ updateWithHttpOperationResponse(personGroupId, options) { let client = this.client; let self = this; return new Promise((resolve, reject) => { self._update(personGroupId, options, (err, result, request, response) => { let httpOperationResponse = new msRest.HttpOperationResponse(request, response); httpOperationResponse.body = result; if (err) { reject(err); } else { resolve(httpOperationResponse); } return; }); }); } /** * Update an existing person group's display name and userData. The properties * which does not appear in request body will not be updated. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {string} [options.name] User defined name, maximum length is 128. * * @param {string} [options.userData] User specified data. Length should not * exceed 16KB. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} [optionalCallback] - The optional callback. * * @returns {function|Promise} If a callback was passed as the last parameter * then it returns the callback else returns a Promise. * * {Promise} A promise is returned * * @resolve {null} - The deserialized result object. * * @reject {Error} - The error object. * * {function} optionalCallback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ update(personGroupId, options, optionalCallback) { let client = this.client; let self = this; if (!optionalCallback && typeof options === 'function') { optionalCallback = options; options = null; } if (!optionalCallback) { return new Promise((resolve, reject) => { self._update(personGroupId, options, (err, result, request, response) => { if (err) { reject(err); } else { resolve(result); } return; }); }); } else { return self._update(personGroupId, options, optionalCallback); } } /** * Retrieve the training status of a person group (completed or ongoing). * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<TrainingStatus>} - The deserialized result object. * * @reject {Error} - The error object. */ getTrainingStatusWithHttpOperationResponse(personGroupId, options) { let client = this.client; let self = this; return new Promise((resolve, reject) => { self._getTrainingStatus(personGroupId, options, (err, result, request, response) => { let httpOperationResponse = new msRest.HttpOperationResponse(request, response); httpOperationResponse.body = result; if (err) { reject(err); } else { resolve(httpOperationResponse); } return; }); }); } /** * Retrieve the training status of a person group (completed or ongoing). * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} [optionalCallback] - The optional callback. * * @returns {function|Promise} If a callback was passed as the last parameter * then it returns the callback else returns a Promise. * * {Promise} A promise is returned * * @resolve {TrainingStatus} - The deserialized result object. * * @reject {Error} - The error object. * * {function} optionalCallback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. * See {@link TrainingStatus} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ getTrainingStatus(personGroupId, options, optionalCallback) { let client = this.client; let self = this; if (!optionalCallback && typeof options === 'function') { optionalCallback = options; options = null; } if (!optionalCallback) { return new Promise((resolve, reject) => { self._getTrainingStatus(personGroupId, options, (err, result, request, response) => { if (err) { reject(err); } else { resolve(result); } return; }); }); } else { return self._getTrainingStatus(personGroupId, options, optionalCallback); } } /** * List person groups and their information. * * @param {object} [options] Optional Parameters. * * @param {string} [options.start] List person groups from the least * personGroupId greater than the "start". * * @param {number} [options.top] The number of person groups to list. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<Array>} - The deserialized result object. * * @reject {Error} - The error object. */ listWithHttpOperationResponse(options) { let client = this.client; let self = this; return new Promise((resolve, reject) => { self._list(options, (err, result, request, response) => { let httpOperationResponse = new msRest.HttpOperationResponse(request, response); httpOperationResponse.body = result; if (err) { reject(err); } else { resolve(httpOperationResponse); } return; }); }); } /** * List person groups and their information. * * @param {object} [options] Optional Parameters. * * @param {string} [options.start] List person groups from the least * personGroupId greater than the "start". * * @param {number} [options.top] The number of person groups to list. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} [optionalCallback] - The optional callback. * * @returns {function|Promise} If a callback was passed as the last parameter * then it returns the callback else returns a Promise. * * {Promise} A promise is returned * * @resolve {Array} - The deserialized result object. * * @reject {Error} - The error object. * * {function} optionalCallback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {array} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ list(options, optionalCallback) { let client = this.client; let self = this; if (!optionalCallback && typeof options === 'function') { optionalCallback = options; options = null; } if (!optionalCallback) { return new Promise((resolve, reject) => { self._list(options, (err, result, request, response) => { if (err) { reject(err); } else { resolve(result); } return; }); }); } else { return self._list(options, optionalCallback); } } /** * Queue a person group training task, the training task may not be started * immediately. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error} - The error object. */ trainWithHttpOperationResponse(personGroupId, options) { let client = this.client; let self = this; return new Promise((resolve, reject) => { self._train(personGroupId, options, (err, result, request, response) => { let httpOperationResponse = new msRest.HttpOperationResponse(request, response); httpOperationResponse.body = result; if (err) { reject(err); } else { resolve(httpOperationResponse); } return; }); }); } /** * Queue a person group training task, the training task may not be started * immediately. * * @param {string} personGroupId Id referencing a particular person group. * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {function} [optionalCallback] - The optional callback. * * @returns {function|Promise} If a callback was passed as the last parameter * then it returns the callback else returns a Promise. * * {Promise} A promise is returned * * @resolve {null} - The deserialized result object. * * @reject {Error} - The error object. * * {function} optionalCallback(err, result, request, response) * * {Error} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ train(personGroupId, options, optionalCallback) { let client = this.client; let self = this; if (!optionalCallback && typeof options === 'function') { optionalCallback = options; options = null; } if (!optionalCallback) { return new Promise((resolve, reject) => { self._train(personGroupId, options, (err, result, request, response) => { if (err) { reject(err); } else { resolve(result); } return; }); }); } else { return self._train(personGroupId, options, optionalCallback); } } } module.exports = PersonGroupOperations;
PersonGroupOperations
test_cloud_storage_transfer_service.py
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import mock from parameterized import parameterized from airflow.gcp.hooks.cloud_storage_transfer_service import GcpTransferOperationStatus from airflow.gcp.sensors.cloud_storage_transfer_service import CloudDataTransferServiceJobStatusSensor class TestGcpStorageTransferOperationWaitForJobStatusSensor(unittest.TestCase): @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_success(self, mock_tool): operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}] mock_tool.return_value.list_transfer_operations.return_value = operations mock_tool.operations_contain_expected_statuses.return_value = True op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) mock_tool.return_value.list_transfer_operations.assert_called_once_with( request_filter={'project_id': 'project-id', 'job_names': ['job-name']} ) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations, expected_statuses={GcpTransferOperationStatus.SUCCESS} ) self.assertTrue(result) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_success_default_expected_status(self, mock_tool): op = CloudDataTransferServiceJobStatusSensor( task_id='task-id',
project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=mock.ANY, expected_statuses={GcpTransferOperationStatus.SUCCESS} ) self.assertTrue(result) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_after_retry(self, mock_tool): operations_set = [ [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}], [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}], ] mock_tool.return_value.list_transfer_operations.side_effect = operations_set mock_tool.operations_contain_expected_statuses.side_effect = [False, True] op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) self.assertFalse(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations_set[0], expected_statuses={GcpTransferOperationStatus.SUCCESS} ) mock_tool.operations_contain_expected_statuses.reset_mock() result = op.poke(context) self.assertTrue(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations_set[1], expected_statuses={GcpTransferOperationStatus.SUCCESS} ) @parameterized.expand( [ (GcpTransferOperationStatus.SUCCESS, {GcpTransferOperationStatus.SUCCESS}), ({GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS}), ( {GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS}, ), ] ) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_normalize_status(self, expected_status, received_status, mock_tool): operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}] mock_tool.return_value.list_transfer_operations.return_value = operations mock_tool.operations_contain_expected_statuses.side_effect = [False, True] op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=expected_status, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) self.assertFalse(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations, expected_statuses=received_status )
job_name='job-name',
__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/2/5 0005 上午 8:56 # @Author : Trojx
# @File : __init__.py.py
map.ts
import { defineConfig } from '@kidar/echarts-helper' import * as echarts from 'echarts' import china from './geojson/china.json' import { SERIES_TYPE } from './constant' import citiesIngLat from './asset/json/cities_lng_lat.json' import { isNull, setTitle } from './utils' echarts.registerMap('china', { geoJSON: china as any, specialAreas: { china: { left: 0, top: 0 } } }) declare class CitiesPoints { [key: string]: number[] } declare class LinesItem { coords: Array<Array<any>> lineStyle?: { color?: echarts.Color } } declare class
{ value: Array<any> itemStyle?: { color?: echarts.Color } } interface MapDataItem { name: string value: number lng?: number lat?: number tos?: MapDataItem } export default defineConfig({ resetOption(cols, data: MapDataItem[], ctx) { const title = setTitle(ctx) let themeColor = ctx.chart.getOption().color const colors = Array.isArray(themeColor) ? themeColor : [themeColor] const symbolSizeMax = Math.min(ctx.chart.getHeight(), ctx.chart.getWidth()) / 15 const scatterData: ScatterItem[] = [] const linesData: LinesItem[] = [] const cities = citiesIngLat as CitiesPoints let max = 0 let min = 0 let i = 0 data.forEach(d => { let { name, value, lng, lat, tos } = d max = Math.max(max, value) min = Math.min(min, value) if (isNull(lng) || isNull(lat)) { [lng, lat] = cities[name.replace(/(市|县|地区)/g, '')] || [] } let curP = [lng, lat, value, name, d] scatterData.push({ value: curP, itemStyle: { color: colors[i] } }) if (Array.isArray(tos) && tos.length > 0) { tos.forEach(t => { let [tolng, tolat] = cities[t.name.replace(/(市|县|地区)/g, '')] || [] let toP = [tolng, tolat, t.value, t.name, t] linesData.push({ coords: [curP, toP], lineStyle: { color: colors[i] } }) }) } i++ }) return { title, legend: { show: false }, colorBy: 'data', tooltip: { show: true, formatter: (params: any) => { switch (params.componentSubType) { case 'lines': let [from, to] = params.data.coords || [] return `${from[3]}(${from[2]}) --> ${to[3]}(${to[2]}) <br>${from[2] - to[2]}` case 'effectScatter': const [lng, lat, value, name] = params.value return `${name}: ${value}` } } }, dataZoom: [ { type: 'inside' } ], geo: { roam: true, zoom: 1.1, aspectScale: 0.75, //长宽比 layoutSize: '100%', selectedMode: false, map: 'china', coordinateSystem: 'geo' }, series: [ { id: ctx.chartId, universalTransition: true, type: SERIES_TYPE.effectScatter, coordinateSystem: 'geo', geoIndex: 0, symbolSize: (val: Array<number>) => { let size = (val[2] / (max * 2 - min)) * symbolSizeMax return size }, data: scatterData }, { type: 'lines', zlevel: 2, effect: { show: true, period: 4, //箭头指向速度,值越小速度越快 trailLength: 0.4, //特效尾迹长度[0,1]值越大,尾迹越长重 symbol: 'arrow', //箭头图标 symbolSize: 7, //图标大小 }, data: linesData } ] } } })
ScatterItem
filter_test.py
from otri.filtering.filter import Filter, Stream from unittest.mock import MagicMock import unittest class FilterTest(unittest.TestCase):
def setUp(self): self.s_A = Stream([1, 2, 3]) self.s_B = Stream([3, 4, 5]) self.s_D = Stream() self.s_E = Stream() self.s_F = Stream() self.state = dict() self.f = Filter( inputs=["A", "B"], outputs=["D", "E", "F"], input_count=2, output_count=3 ) self.f.setup([self.s_A, self.s_B], [self.s_D, self.s_E, self.s_F], self.state) def test_filter_input_number_correct(self): self.assertEqual(2, len(self.f.get_input_names())) def test_filter_input_stream_names_equals(self): self.assertEqual(["A", "B"], self.f.get_input_names()) def test_filter_output_number_correct(self): self.assertEqual(3, len(self.f.get_output_names())) def test_filter_output_stream_names_equals(self): self.assertEqual(["D", "E", "F"], self.f.get_output_names()) def test_get_in_streams(self): self.assertEqual(self.s_A, self.f._get_input(0)) self.assertEqual(self.s_B, self.f._get_input(1)) def test_get_out_streams(self): self.assertEqual(self.s_D, self.f._get_output(0)) self.assertEqual(self.s_E, self.f._get_output(1)) self.assertEqual(self.s_F, self.f._get_output(2)) def test_get_in_iters(self): self.assertEqual(iter(self.s_A), self.f._get_in_iter(0)) self.assertEqual(iter(self.s_B), self.f._get_in_iter(1)) def test_get_out_iters(self): self.assertEqual(iter(self.s_D), self.f._get_out_iter(0)) self.assertEqual(iter(self.s_E), self.f._get_out_iter(1)) self.assertEqual(iter(self.s_F), self.f._get_out_iter(2)) def test_pop_data(self): self.assertEqual(1, self.f._pop_data(0)) self.assertEqual(3, self.f._pop_data(1)) def test_push_data(self): self.f._push_data(5, 0) self.assertEqual(5, self.s_D.__iter__().__next__()) def test_execute_outputs_closed(self): self.s_D.close() self.s_E.close() self.s_F.close() self.f._on_outputs_closed = MagicMock() self.f.execute() self.assertTrue(self.f._on_outputs_closed.called) def test_execute_on_data(self): self.f._on_data = MagicMock() self.f.execute() self.assertTrue( self.f._on_data.called) def test_execute_input_empty(self): self.s_A.clear() self.s_B.clear() self.f._on_inputs_empty = MagicMock() self.f.execute() self.assertTrue( self.f._on_inputs_empty.called) def test_execute_input_closed(self): self.s_A.clear() self.s_B.clear() self.s_A.close() self.s_B.close() self.f._on_inputs_closed = MagicMock() self.f.execute() self.assertTrue( self.f._on_inputs_closed.called) def test_default_on_inputs_closed_closes_outputs(self): self.s_A.clear() self.s_B.clear() self.s_A.close() self.s_B.close() self.f.execute() self.assertTrue( self.f._get_output(0).is_closed()) self.assertTrue( self.f._get_output(1).is_closed()) self.assertTrue( self.f._get_output(2).is_closed())
my-component.component.ts
import {Component} from 'angular2/core'; import {TestComponent} from './test.component'; import {OnInit} from 'angular2/core'; @Component({ selector: 'my-component', template: ` Hi, I'm <span [style.color]="inputElement.value === 'yes' ? 'red' : 'blue'">{{name}}</span> and this is my first angular2 component! <span [class.is-awesome]="inputElement.value === 'yes'">It is awesome!</span> <br> <br> Is it awesome? <input type="text" #inputElement (keyup)="0"> <br> <br> <button [disabled]='inputElement.value != "yes"'>Only enabled if 'yes' was entered</button> <test></test> `, styleUrls: ['src/css/mycomponent.css'], directives: [TestComponent] })
this.name = "Simon"; } }
export class MyComponentComponent implements OnInit { name: string; ngOnInit():any {
worker_api_direct.rs
/* Copyright 2019 Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub extern crate alloc; use alloc::{ borrow::ToOwned, format, slice::{from_raw_parts, from_raw_parts_mut}, str, string::String, vec::Vec, }; use core::{ops::Deref, result::Result}; use sgx_types::*; use std::{ sync::atomic::{AtomicPtr, Ordering}, sync::{Arc, SgxMutex}, }; use sp_core::H256 as Hash; use codec::{Decode, Encode}; use log::*; use crate::rpc::{ api::SideChainApi, author::{Author, AuthorApi}, basic_pool::BasicPool, }; use crate::top_pool::pool::Options as PoolOptions; use jsonrpc_core::futures::executor; use jsonrpc_core::Error as RpcError; use jsonrpc_core::*; use serde_json::*; use substratee_stf::ShardIdentifier; use base58::FromBase58; use chain_relay::Block; use substratee_node_primitives::Request; use substratee_worker_primitives::RpcReturnValue; use substratee_worker_primitives::{DirectRequestStatus, TrustedOperationStatus}; use crate::rsa3072; use crate::utils::write_slice_and_whitespace_pad; static GLOBAL_TX_POOL: AtomicPtr<()> = AtomicPtr::new(0 as *mut ()); extern "C" { pub fn ocall_update_status_event( ret_val: *mut sgx_status_t, hash_encoded: *const u8, hash_size: u32, status_update_encoded: *const u8, status_size: u32, ) -> sgx_status_t; pub fn ocall_send_status( ret_val: *mut sgx_status_t, hash_encoded: *const u8, hash_size: u32, status_update_encoded: *const u8, status_size: u32, ) -> sgx_status_t; } #[no_mangle] // initialise tx pool and store within static atomic pointer pub unsafe extern "C" fn initialize_pool() -> sgx_status_t { let api = Arc::new(SideChainApi::new()); let tx_pool = BasicPool::create(PoolOptions::default(), api); let pool_ptr = Arc::new(SgxMutex::<BasicPool<SideChainApi<Block>, Block>>::new( tx_pool, )); let ptr = Arc::into_raw(pool_ptr); GLOBAL_TX_POOL.store(ptr as *mut (), Ordering::SeqCst); sgx_status_t::SGX_SUCCESS } pub fn load_top_pool() -> Option<&'static SgxMutex<BasicPool<SideChainApi<Block>, Block>>> { let ptr = GLOBAL_TX_POOL.load(Ordering::SeqCst) as *mut SgxMutex<BasicPool<SideChainApi<Block>, Block>>; if ptr.is_null() { None } else { Some(unsafe { &*ptr }) } }
let mut method_string = String::new(); for i in 0..vec_methods.len() { method_string.push_str(vec_methods[i]); if vec_methods.len() > (i + 1) { method_string.push_str(", "); } } format!("methods: [{}]", method_string) } // converts the rpc methods vector to a string and adds commas and brackets for readability fn decode_shard_from_base58(shard_base58: String) -> Result<ShardIdentifier, String> { let shard_vec = match shard_base58.from_base58() { Ok(vec) => vec, Err(_) => return Err("Invalid base58 format of shard id".to_owned()), }; let shard = match ShardIdentifier::decode(&mut shard_vec.as_slice()) { Ok(hash) => hash, Err(_) => return Err("Shard ID is not of type H256".to_owned()), }; Ok(shard) } fn compute_encoded_return_error(error_msg: String) -> Vec<u8> { let return_value = RpcReturnValue { value: error_msg.encode(), do_watch: false, status: DirectRequestStatus::Error, }; return_value.encode() } fn init_io_handler() -> IoHandler { let mut io = IoHandler::new(); let mut rpc_methods_vec: Vec<&str> = Vec::new(); // Add rpc methods // author_submitAndWatchExtrinsic let author_submit_and_watch_extrinsic_name: &str = "author_submitAndWatchExtrinsic"; rpc_methods_vec.push(author_submit_and_watch_extrinsic_name); io.add_sync_method( author_submit_and_watch_extrinsic_name, move |params: Params| { match params.parse::<Vec<u8>>() { Ok(encoded_params) => { // Aquire lock let tx_pool_mutex = load_top_pool().unwrap(); let tx_pool_guard = tx_pool_mutex.lock().unwrap(); let tx_pool = Arc::new(tx_pool_guard.deref()); let author = Author::new(tx_pool); match Request::decode(&mut encoded_params.as_slice()) { Ok(request) => { let shard: ShardIdentifier = request.shard; let encrypted_trusted_call: Vec<u8> = request.cyphertext; let result = async { author .watch_top(encrypted_trusted_call.clone(), shard) .await }; let response: Result<Hash, RpcError> = executor::block_on(result); let json_value = match response { Ok(hash_value) => RpcReturnValue { do_watch: true, value: hash_value.encode(), status: DirectRequestStatus::TrustedOperationStatus( TrustedOperationStatus::Submitted, ), } .encode(), Err(rpc_error) => compute_encoded_return_error(rpc_error.message), }; Ok(json!(json_value)) } Err(_) => Ok(json!(compute_encoded_return_error( "Could not decode request".to_owned() ))), } } Err(e) => { let error_msg: String = format!("Could not submit trusted call due to: {}", e); Ok(json!(compute_encoded_return_error(error_msg))) } } }, ); // author_submitExtrinsic let author_submit_extrinsic_name: &str = "author_submitExtrinsic"; rpc_methods_vec.push(author_submit_extrinsic_name); io.add_sync_method(author_submit_extrinsic_name, move |params: Params| { match params.parse::<Vec<u8>>() { Ok(encoded_params) => { // Aquire lock let tx_pool_mutex = load_top_pool().unwrap(); let tx_pool_guard = tx_pool_mutex.lock().unwrap(); let tx_pool = Arc::new(tx_pool_guard.deref()); let author = Author::new(tx_pool); match Request::decode(&mut encoded_params.as_slice()) { Ok(request) => { let shard: ShardIdentifier = request.shard; let encrypted_trusted_op: Vec<u8> = request.cyphertext; let result = async { author.submit_top(encrypted_trusted_op.clone(), shard).await }; let response: Result<Hash, RpcError> = executor::block_on(result); let json_value = match response { Ok(hash_value) => RpcReturnValue { do_watch: false, value: hash_value.encode(), status: DirectRequestStatus::TrustedOperationStatus( TrustedOperationStatus::Submitted, ), } .encode(), Err(rpc_error) => compute_encoded_return_error(rpc_error.message), }; Ok(json!(json_value)) } Err(_) => Ok(json!(compute_encoded_return_error( "Could not decode request".to_owned() ))), } } Err(e) => { let error_msg: String = format!("Could not submit trusted call due to: {}", e); Ok(json!(compute_encoded_return_error(error_msg))) } } }); // author_pendingExtrinsics let author_pending_extrinsic_name: &str = "author_pendingExtrinsics"; rpc_methods_vec.push(author_pending_extrinsic_name); io.add_sync_method(author_pending_extrinsic_name, move |params: Params| { match params.parse::<Vec<String>>() { Ok(shards) => { // Aquire tx_pool lock let tx_pool_mutex = load_top_pool().unwrap(); let tx_pool_guard = tx_pool_mutex.lock().unwrap(); let tx_pool = Arc::new(tx_pool_guard.deref()); let author = Author::new(tx_pool); let mut retrieved_operations = vec![]; for shard_base58 in shards.iter() { let shard = match decode_shard_from_base58(shard_base58.clone()) { Ok(id) => id, Err(msg) => return Ok(Value::String(msg)), }; if let Ok(vec_of_operations) = author.pending_tops(shard) { retrieved_operations.push(vec_of_operations); } } let json_value = RpcReturnValue { do_watch: false, value: retrieved_operations.encode(), status: DirectRequestStatus::Ok, }; Ok(json!(json_value.encode())) } Err(e) => { let error_msg: String = format!("Could not retrieve pending calls due to: {}", e); Ok(json!(compute_encoded_return_error(error_msg))) } } }); // author_getShieldingKey let rsa_pubkey_name: &str = "author_getShieldingKey"; rpc_methods_vec.push(rsa_pubkey_name); io.add_sync_method(rsa_pubkey_name, move |_: Params| { let rsa_pubkey = match rsa3072::unseal_pubkey() { Ok(key) => key, Err(status) => { let error_msg: String = format!("Could not get rsa pubkey due to: {}", status); return Ok(json!(compute_encoded_return_error(error_msg))); } }; let rsa_pubkey_json = match serde_json::to_string(&rsa_pubkey) { Ok(k) => k, Err(x) => { let error_msg: String = format!( "[Enclave] can't serialize rsa_pubkey {:?} {}", rsa_pubkey, x ); return Ok(json!(compute_encoded_return_error(error_msg))); } }; let json_value = RpcReturnValue::new(rsa_pubkey_json.encode(), false, DirectRequestStatus::Ok); Ok(json!(json_value.encode())) }); // chain_subscribeAllHeads let chain_subscribe_all_heads_name: &str = "chain_subscribeAllHeads"; rpc_methods_vec.push(chain_subscribe_all_heads_name); io.add_sync_method(chain_subscribe_all_heads_name, |_: Params| { let parsed = "world"; Ok(Value::String(format!("hello, {}", parsed))) }); // state_getMetadata let state_get_metadata_name: &str = "state_getMetadata"; rpc_methods_vec.push(state_get_metadata_name); io.add_sync_method(state_get_metadata_name, |_: Params| { let parsed = "world"; Ok(Value::String(format!("hello, {}", parsed))) }); // state_getRuntimeVersion let state_get_runtime_version_name: &str = "state_getRuntimeVersion"; rpc_methods_vec.push(state_get_runtime_version_name); io.add_sync_method(state_get_runtime_version_name, |_: Params| { let parsed = "world"; Ok(Value::String(format!("hello, {}", parsed))) }); // state_get let state_get_name: &str = "state_get"; rpc_methods_vec.push(state_get_name); io.add_sync_method(state_get_name, |_: Params| { let parsed = "world"; Ok(Value::String(format!("hello, {}", parsed))) }); // system_health let state_health_name: &str = "system_health"; rpc_methods_vec.push(state_health_name); io.add_sync_method(state_health_name, |_: Params| { let parsed = "world"; Ok(Value::String(format!("hello, {}", parsed))) }); // system_name let state_name_name: &str = "system_name"; rpc_methods_vec.push(state_name_name); io.add_sync_method(state_name_name, |_: Params| { let parsed = "world"; Ok(Value::String(format!("hello, {}", parsed))) }); // system_version let state_version_name: &str = "system_version"; rpc_methods_vec.push(state_version_name); io.add_sync_method(state_version_name, |_: Params| { let parsed = "world"; Ok(Value::String(format!("hello, {}", parsed))) }); // returns all rpcs methods let rpc_methods_string: String = convert_vec_to_string(rpc_methods_vec); io.add_sync_method("rpc_methods", move |_: Params| { Ok(Value::String(rpc_methods_string.to_owned())) }); io } #[no_mangle] pub unsafe extern "C" fn call_rpc_methods( request: *const u8, request_len: u32, response: *mut u8, response_len: u32, ) -> sgx_status_t { // init let io = init_io_handler(); // get request string let req: Vec<u8> = from_raw_parts(request, request_len as usize).to_vec(); let request_string = match str::from_utf8(&req) { Ok(req) => req, Err(e) => { error!("Decoding Header failed. Error: {:?}", e); return sgx_status_t::SGX_ERROR_UNEXPECTED; } }; // Rpc Response String let response_string = io.handle_request_sync(request_string).unwrap(); // update response outside of enclave let response_slice = from_raw_parts_mut(response, response_len as usize); write_slice_and_whitespace_pad(response_slice, response_string.as_bytes().to_vec()); sgx_status_t::SGX_SUCCESS } pub fn update_status_event<H: Encode>( hash: H, status_update: TrustedOperationStatus, ) -> Result<(), ()> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; let hash_encoded = hash.encode(); let status_update_encoded = status_update.encode(); let res = unsafe { ocall_update_status_event( &mut rt as *mut sgx_status_t, hash_encoded.as_ptr(), hash_encoded.len() as u32, status_update_encoded.as_ptr(), status_update_encoded.len() as u32, ) }; if rt != sgx_status_t::SGX_SUCCESS { return Err(()); } if res != sgx_status_t::SGX_SUCCESS { return Err(()); } Ok(()) } pub fn send_state<H: Encode>(hash: H, value_opt: Option<Vec<u8>>) -> Result<(), ()> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; let hash_encoded = hash.encode(); let value_encoded = value_opt.encode(); let res = unsafe { ocall_send_status( &mut rt as *mut sgx_status_t, hash_encoded.as_ptr(), hash_encoded.len() as u32, value_encoded.as_ptr(), value_encoded.len() as u32, ) }; if rt != sgx_status_t::SGX_SUCCESS { return Err(()); } if res != sgx_status_t::SGX_SUCCESS { return Err(()); } Ok(()) }
// converts the rpc methods vector to a string and adds commas and brackets for readability fn convert_vec_to_string(vec_methods: Vec<&str>) -> String {
can_partition_k_subsets.rs
/* 给定一个整数数组  nums 和一个正整数 k,找出是否有可能把这个数组分成 k 个非空子集,其总和都相等。 示例 1: 输入: nums = [4, 3, 2, 3, 5, 2, 1], k = 4 输出: True 说明: 有可能将其分成 4 个子集(5),(1,4),(2,3),(2,3)等于总和。   提示: 1 <= k <= len(nums) <= 16 0 < nums[i] < 10000 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/partition-to-k-equal-sum-subsets 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 */ /* 回溯法 不通过,超出时间限制 */ pub fn can_partition_k_subsets(nums: Vec<i32>, k: i32) -> bool { //计算每个子集应该的和 let mut sum = 0; //坑,这里如果是直接写nums来迭代,后面会导致nums的所有权转移,导致后面无法正常使用nums for x in &nums { sum += x; } if sum % k != 0 { return false; } let mut path = vec![0; k as usize]; return backtrack(&mut path, 0, nums.len(), &nums, sum / k, k); } fn backtrack(path: &mut Vec<i32>, idx: usize, len: usize, nums: &Vec<i32>, sum: i32, k: i32) -> bool { if idx == len { return true; } //尝试放入对应的每个子集中 for i in 0..k as usize { if path[i] + nums[idx] <= sum { path[i] += nums[idx]; let res = backtrack(path, idx + 1, len, nums, sum, k); if res { return true; } //回溯 path[i] -= nums[idx]; } //优化点1.如果当前分组为0,则没必要尝试下一个分组 if path[i] == 0 { break; } } //所有结果都尝试了,不行就返回false return false; } #[test] fn test() { let nums = vec![4, 3, 2, 3, 5, 2, 1]; let res = can_partition_k_subsets(nums, 4); println!("res:{}", res); assert!(res); }
cloudfront_distribution_test.go
package aws_test import ( "testing" "github.com/infracost/infracost/internal/testutil" "github.com/infracost/infracost/internal/providers/terraform/tftest" ) func
(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode") } tf := ` resource "aws_s3_bucket" "b" { bucket = "mybucket" acl = "private" tags = { Name = "My bucket" } } locals { s3_origin_id = "myS3Origin" } resource "aws_cloudfront_distribution" "s3_distribution" { origin { domain_name = aws_s3_bucket.b.bucket_regional_domain_name origin_id = local.s3_origin_id s3_origin_config { origin_access_identity = "origin-access-identity/cloudfront/ABCDEFG1234567" } } enabled = true is_ipv6_enabled = true comment = "Some comment" default_root_object = "index.html" logging_config { include_cookies = false bucket = "mylogs.s3.amazonaws.com" prefix = "myprefix" } aliases = ["mysite.example.com", "yoursite.example.com"] default_cache_behavior { allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] cached_methods = ["GET", "HEAD"] target_origin_id = local.s3_origin_id forwarded_values { query_string = false cookies { forward = "none" } } viewer_protocol_policy = "allow-all" min_ttl = 0 default_ttl = 3600 max_ttl = 86400 } # Cache behavior with precedence 0 ordered_cache_behavior { path_pattern = "/content/immutable/*" allowed_methods = ["GET", "HEAD", "OPTIONS"] cached_methods = ["GET", "HEAD", "OPTIONS"] target_origin_id = local.s3_origin_id forwarded_values { query_string = false headers = ["Origin"] cookies { forward = "none" } } min_ttl = 0 default_ttl = 86400 max_ttl = 31536000 compress = true viewer_protocol_policy = "redirect-to-https" } # Cache behavior with precedence 1 ordered_cache_behavior { path_pattern = "/content/*" allowed_methods = ["GET", "HEAD", "OPTIONS"] cached_methods = ["GET", "HEAD"] target_origin_id = local.s3_origin_id forwarded_values { query_string = false cookies { forward = "none" } } min_ttl = 0 default_ttl = 3600 max_ttl = 86400 compress = true viewer_protocol_policy = "redirect-to-https" } price_class = "PriceClass_200" restrictions { geo_restriction { restriction_type = "whitelist" locations = ["US", "CA", "GB", "DE"] } } tags = { Environment = "production" } viewer_certificate { cloudfront_default_certificate = true } } ` resourceChecks := []testutil.ResourceCheck{ { Name: "aws_cloudfront_distribution.s3_distribution", CostComponentChecks: []testutil.CostComponentCheck{ { Name: "Field level encryption requests", PriceHash: "23b94d89fdbc6e2e4ba62367419e8b3d-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Real-time log requests", PriceHash: "d2263008404d6c3cfe3f3ad047842cea-361e966330f27dcb2d64319ce0c579cf", }, { Name: "Dedicated IP custom SSLs", PriceHash: "e15ddcbddbedf5da838718e496f3f9de-a9191d0a7972a4ac9c0e44b9ea6310bb", }, { Name: "Invalidation requests (first 1k)", PriceHash: "a38b0d76c23fe5c7e80d44fe2950d154-a71f166085a0bf987715473b95588268", }, }, SubResourceChecks: []testutil.ResourceCheck{ { Name: "Data transfer out to internet", CostComponentChecks: []testutil.CostComponentCheck{ { Name: "US, Mexico, Canada (first 10TB)", PriceHash: "99df20efc8b58ceb7813f795a75772be-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "Europe, Israel (first 10TB)", PriceHash: "d0e5286d1ab64579ef1a32ad9c6b0d23-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "South Africa, Kenya, Middle East (first 10TB)", PriceHash: "8867695c7ff0b60dc0ead9aaa49e0b78-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "South America (first 10TB)", PriceHash: "24a65fd18a4ff0cbdd8c00be1ca8e8b2-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "Japan (first 10TB)", PriceHash: "25895b95f4d37a1941ab6f1f6f1bee7e-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "Australia, New Zealand (first 10TB)", PriceHash: "f22352efe593321e3c184abb089b6bc4-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "Hong Kong, Philippines, Asia Pacific (first 10TB)", PriceHash: "cfc8f70af2243c498cb6a86a75e61ecf-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "India (first 10TB)", PriceHash: "33e8f28eace821ff2d942d9d36be1847-b1ae3861dc57e2db217fa83a7420374f", }, }, }, { Name: "Data transfer out to origin", CostComponentChecks: []testutil.CostComponentCheck{ { Name: "US, Mexico, Canada", PriceHash: "0c8dbb9a1aad0159dba32a7dcd48b384-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "Europe, Israel", PriceHash: "afb13cd55f419b70212c5767ff502944-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "South Africa, Kenya, Middle East", PriceHash: "7cbab97f2b54211d7654b0e4ba3f3c70-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "South America", PriceHash: "5cc794b11c9e61704a9dfdeaa95721d6-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "Japan", PriceHash: "5456abd68dfb61de5a60286196e52205-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "Australia, New Zealand", PriceHash: "80125f460392b4b600eb5954de37e913-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "Hong Kong, Philippines, Asia Pacific", PriceHash: "63a411ecbb6d084a9e9c15b49c4a3ec9-b1ae3861dc57e2db217fa83a7420374f", }, { Name: "India", PriceHash: "74d31f8195b5487364d2ae10b0b144c4-b1ae3861dc57e2db217fa83a7420374f", }, }, }, { Name: "HTTP requests", CostComponentChecks: []testutil.CostComponentCheck{ { Name: "US, Mexico, Canada", PriceHash: "6e7bb9693c7bdc3c1b09a5ad0cd11a4a-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Europe, Israel", PriceHash: "f81d8aa74fae2d32a4149a85920f3255-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "South Africa, Kenya, Middle East", PriceHash: "c64d2813fa3777ace1a1006389217239-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "South America", PriceHash: "f0243692bd53ed2cef6ed6445b0c5683-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Japan", PriceHash: "681d410b9400be8fb5e7e2d1b089d159-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Australia, New Zealand", PriceHash: "4e86dc6c95675a4c8dd4ac876a30ab3c-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Hong Kong, Philippines, Asia Pacific", PriceHash: "871d73c17fc8c93de0ccdbc2c9c470d7-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "India", PriceHash: "2632f4cda76bc34285fb6cd5fb894ee4-4a9dfd3965ffcbab75845ead7a27fd47", }, }, }, { Name: "HTTPS requests", CostComponentChecks: []testutil.CostComponentCheck{ { Name: "US, Mexico, Canada", PriceHash: "8890fabb60883960c9178fe0e753e47e-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Europe, Israel", PriceHash: "63c72b02594fc500d149b54e4248e38b-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "South Africa, Kenya, Middle East", PriceHash: "a1527c0b56940465cf2a5eabf97e45f0-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "South America", PriceHash: "3388ba97d6c8373e5c6de6ff51b431af-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Japan", PriceHash: "3f75cf910bfbe3e47bbff04ed01e3986-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Australia, New Zealand", PriceHash: "358f87101e7deff58a09cc76e1de7bd3-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Hong Kong, Philippines, Asia Pacific", PriceHash: "1931ee7f0715a77116c6c4a7e1eecf49-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "India", PriceHash: "0a703a33e830797459e6a0226336bb19-4a9dfd3965ffcbab75845ead7a27fd47", }, }, }, { Name: "Origin shield HTTP requests", CostComponentChecks: []testutil.CostComponentCheck{ { Name: "US", PriceHash: "9a59a3308256aab9256b6a421fd072d9-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Europe", PriceHash: "43f5e56d0b879abe92fc71f280d995fc-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "South America", PriceHash: "224f2fff366333b0e6dfeb454010be9f-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Japan", PriceHash: "1169ba622705234fd01b29ed53173f2d-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Australia", PriceHash: "57674bc88879a321596331ff12c624fa-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "Singapore", PriceHash: "57e69a82635268b50499099c6311b694-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "South Korea", PriceHash: "f1f36dcbd00e0b5a78dd8134b1314350-4a9dfd3965ffcbab75845ead7a27fd47", }, { Name: "India", PriceHash: "dce9a91d009b3e40ab41d992d6009779-4a9dfd3965ffcbab75845ead7a27fd47", }, }, }, }, }, { Name: "aws_s3_bucket.b", SkipCheck: true, }, } tftest.ResourceTests(t, tf, resourceChecks) }
TestCloudfrontDistribution
actor_test.go
package actor_test import ( "testing" "time" "github.com/dc0d/club/actor" "github.com/stretchr/testify/assert" ) var (
period = actor.Period ) func Test01(t *testing.T) { assert := assert.New(t) errors := make(chan error, 10) actor.Start(func(mailbox <-chan interface{}) error { panic(10) }, mailboxSize(10), period(time.Millisecond*10), numberOfRetries(3), onError(func(e error) { errors <- e })) var c int FOR01: for i := 1; i <= 10; i++ { select { case <-errors: case <-time.After(time.Millisecond * 100): break FOR01 } c++ } assert.Equal(3, c) }
mailboxSize = actor.MailboxSize numberOfRetries = actor.NumberOfRetries onError = actor.OnError
asm.go
// Do not edit. Bootstrap copy of /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ppc64/asm.go //line /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ppc64/asm.go:1 // Inferno utils/5l/asm.c // http://code.google.com/p/inferno-os/source/browse/utils/5l/asm.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth ([email protected]) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth ([email protected]) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package ppc64 import ( "bootstrap/internal/obj" "bootstrap/link/internal/ld" "encoding/binary" "fmt" "log" ) func gentext() { var s *ld.LSym var stub *ld.LSym var pprevtextp **ld.LSym var r *ld.Reloc var n string var o1 uint32 var i int // The ppc64 ABI PLT has similar concepts to other // architectures, but is laid out quite differently. When we // see an R_PPC64_REL24 relocation to a dynamic symbol // (indicating that the call needs to go through the PLT), we // generate up to three stubs and reserve a PLT slot. // // 1) The call site will be bl x; nop (where the relocation // applies to the bl). We rewrite this to bl x_stub; ld // r2,24(r1). The ld is necessary because x_stub will save // r2 (the TOC pointer) at 24(r1) (the "TOC save slot"). // // 2) We reserve space for a pointer in the .plt section (once // per referenced dynamic function). .plt is a data // section filled solely by the dynamic linker (more like // .plt.got on other architectures). Initially, the // dynamic linker will fill each slot with a pointer to the // corresponding x@plt entry point. // // 3) We generate the "call stub" x_stub (once per dynamic // function/object file pair). This saves the TOC in the // TOC save slot, reads the function pointer from x's .plt // slot and calls it like any other global entry point // (including setting r12 to the function address). // // 4) We generate the "symbol resolver stub" x@plt (once per // dynamic function). This is solely a branch to the glink // resolver stub. // // 5) We generate the glink resolver stub (only once). This // computes which symbol resolver stub we came through and // invokes the dynamic resolver via a pointer provided by // the dynamic linker. This will patch up the .plt slot to // point directly at the function so future calls go // straight from the call stub to the real function, and // then call the function. // NOTE: It's possible we could make ppc64 closer to other // architectures: ppc64's .plt is like .plt.got on other // platforms and ppc64's .glink is like .plt on other // platforms. // Find all R_PPC64_REL24 relocations that reference dynamic // imports. Reserve PLT entries for these symbols and // generate call stubs. The call stubs need to live in .text, // which is why we need to do this pass this early. // // This assumes "case 1" from the ABI, where the caller needs // us to save and restore the TOC pointer. pprevtextp = &ld.Ctxt.Textp for s = *pprevtextp; s != nil; pprevtextp, s = &s.Next, s.Next { for i = range s.R { r = &s.R[i] if r.Type != 256+ld.R_PPC64_REL24 || r.Sym.Type != obj.SDYNIMPORT { continue } // Reserve PLT entry and generate symbol // resolver addpltsym(ld.Ctxt, r.Sym) // Generate call stub n = fmt.Sprintf("%s.%s", s.Name, r.Sym.Name) stub = ld.Linklookup(ld.Ctxt, n, 0) stub.Reachable = stub.Reachable || s.Reachable if stub.Size == 0 { // Need outer to resolve .TOC. stub.Outer = s // Link in to textp before s (we could // do it after, but would have to skip // the subsymbols) *pprevtextp = stub stub.Next = s pprevtextp = &stub.Next gencallstub(1, stub, r.Sym) } // Update the relocation to use the call stub r.Sym = stub // Restore TOC after bl. The compiler put a // nop here for us to overwrite. o1 = 0xe8410018 // ld r2,24(r1) ld.Ctxt.Arch.ByteOrder.PutUint32(s.P[r.Off+4:], o1) } } } // Construct a call stub in stub that calls symbol targ via its PLT // entry. func gencallstub(abicase int, stub *ld.LSym, targ *ld.LSym) { if abicase != 1 { // If we see R_PPC64_TOCSAVE or R_PPC64_REL24_NOTOC // relocations, we'll need to implement cases 2 and 3. log.Fatalf("gencallstub only implements case 1 calls") } plt := ld.Linklookup(ld.Ctxt, ".plt", 0) stub.Type = obj.STEXT // Save TOC pointer in TOC save slot ld.Adduint32(ld.Ctxt, stub, 0xf8410018) // std r2,24(r1) // Load the function pointer from the PLT. r := ld.Addrel(stub) r.Off = int32(stub.Size) r.Sym = plt r.Add = int64(targ.Plt) r.Siz = 2 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { r.Off += int32(r.Siz) } r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_HA ld.Adduint32(ld.Ctxt, stub, 0x3d820000) // addis r12,r2,targ@plt@toc@ha r = ld.Addrel(stub) r.Off = int32(stub.Size) r.Sym = plt r.Add = int64(targ.Plt) r.Siz = 2 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { r.Off += int32(r.Siz) } r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_LO ld.Adduint32(ld.Ctxt, stub, 0xe98c0000) // ld r12,targ@plt@toc@l(r12) // Jump to the loaded pointer ld.Adduint32(ld.Ctxt, stub, 0x7d8903a6) // mtctr r12 ld.Adduint32(ld.Ctxt, stub, 0x4e800420) // bctr } func adddynrela(rel *ld.LSym, s *ld.LSym, r *ld.Reloc) { log.Fatalf("adddynrela not implemented") } func adddynre
Sym, r *ld.Reloc) { targ := r.Sym ld.Ctxt.Cursym = s switch r.Type { default: if r.Type >= 256 { ld.Diag("unexpected relocation type %d", r.Type) return } // Handle relocations found in ELF object files. case 256 + ld.R_PPC64_REL24: r.Type = obj.R_CALLPOWER // This is a local call, so the caller isn't setting // up r12 and r2 is the same for the caller and // callee. Hence, we need to go to the local entry // point. (If we don't do this, the callee will try // to use r12 to compute r2.) r.Add += int64(r.Sym.Localentry) * 4 if targ.Type == obj.SDYNIMPORT { // Should have been handled in elfsetupplt ld.Diag("unexpected R_PPC64_REL24 for dyn import") } return case 256 + ld.R_PPC64_ADDR64: r.Type = obj.R_ADDR if targ.Type == obj.SDYNIMPORT { // These happen in .toc sections ld.Adddynsym(ld.Ctxt, targ) rela := ld.Linklookup(ld.Ctxt, ".rela", 0) ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off)) ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_PPC64_ADDR64)) ld.Adduint64(ld.Ctxt, rela, uint64(r.Add)) r.Type = 256 // ignore during relocsym } return case 256 + ld.R_PPC64_TOC16: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_LO | ld.RV_CHECK_OVERFLOW return case 256 + ld.R_PPC64_TOC16_LO: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_LO return case 256 + ld.R_PPC64_TOC16_HA: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW return case 256 + ld.R_PPC64_TOC16_HI: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW return case 256 + ld.R_PPC64_TOC16_DS: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_DS | ld.RV_CHECK_OVERFLOW return case 256 + ld.R_PPC64_TOC16_LO_DS: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_DS return case 256 + ld.R_PPC64_REL16_LO: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_LO r.Add += 2 // Compensate for relocation size of 2 return case 256 + ld.R_PPC64_REL16_HI: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW r.Add += 2 return case 256 + ld.R_PPC64_REL16_HA: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW r.Add += 2 return } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return } // TODO(austin): Translate our relocations to ELF ld.Diag("unsupported relocation for dynamic symbol %s (type=%d stype=%d)", targ.Name, r.Type, targ.Type) } func elfreloc1(r *ld.Reloc, sectoff int64) int { // TODO(minux) return -1 } func elfsetupplt() { plt := ld.Linklookup(ld.Ctxt, ".plt", 0) if plt.Size == 0 { // The dynamic linker stores the address of the // dynamic resolver and the DSO identifier in the two // doublewords at the beginning of the .plt section // before the PLT array. Reserve space for these. plt.Size = 16 } } func machoreloc1(r *ld.Reloc, sectoff int64) int { return -1 } // Return the value of .TOC. for symbol s func symtoc(s *ld.LSym) int64 { var toc *ld.LSym if s.Outer != nil { toc = ld.Linkrlookup(ld.Ctxt, ".TOC.", int(s.Outer.Version)) } else { toc = ld.Linkrlookup(ld.Ctxt, ".TOC.", int(s.Version)) } if toc == nil { ld.Diag("TOC-relative relocation in object without .TOC.") return 0 } return toc.Value } func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { if ld.Linkmode == ld.LinkExternal { // TODO(minux): translate R_ADDRPOWER and R_CALLPOWER into standard ELF relocations. // R_ADDRPOWER corresponds to R_PPC_ADDR16_HA and R_PPC_ADDR16_LO. // R_CALLPOWER corresponds to R_PPC_REL24. return -1 } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got", 0)) return 0 case obj.R_ADDRPOWER: // r->add is two ppc64 instructions holding an immediate 32-bit constant. // We want to add r->sym's address to that constant. // The encoding of the immediate x<<16 + y, // where x is the low 16 bits of the first instruction and y is the low 16 // bits of the second. Both x and y are signed (int16, not uint16). o1 := uint32(r.Add >> 32) o2 := uint32(r.Add) t := ld.Symaddr(r.Sym) if t < 0 { ld.Ctxt.Diag("relocation for %s is too big (>=2G): %d", s.Name, ld.Symaddr(r.Sym)) } t += int64((o1&0xffff)<<16 + uint32(int32(o2)<<16>>16)) if t&0x8000 != 0 { t += 0x10000 } o1 = o1&0xffff0000 | (uint32(t)>>16)&0xffff o2 = o2&0xffff0000 | uint32(t)&0xffff // when laid out, the instruction order must always be o1, o2. if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o1)<<32 | int64(o2) } else { *val = int64(o2)<<32 | int64(o1) } return 0 case obj.R_CALLPOWER: // Bits 6 through 29 = (S + A - P) >> 2 var o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = ld.Be32(s.P[r.Off:]) } else { o1 = ld.Le32(s.P[r.Off:]) } t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off)) if t&3 != 0 { ld.Ctxt.Diag("relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) } if int64(int32(t<<6)>>6) != t { // TODO(austin) This can happen if text > 32M. // Add a call trampoline to .text in that case. ld.Ctxt.Diag("relocation for %s+%d is too big: %d", r.Sym.Name, r.Off, t) } *val = int64(o1&0xfc000003 | uint32(t)&^0xfc000003) return 0 case obj.R_POWER_TOC: // S + A - .TOC. *val = ld.Symaddr(r.Sym) + r.Add - symtoc(s) return 0 } return -1 } func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 { switch r.Variant & ld.RV_TYPE_MASK { default: ld.Diag("unexpected relocation variant %d", r.Variant) fallthrough case ld.RV_NONE: return t case ld.RV_POWER_LO: if r.Variant&ld.RV_CHECK_OVERFLOW != 0 { // Whether to check for signed or unsigned // overflow depends on the instruction var o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = ld.Be32(s.P[r.Off-2:]) } else { o1 = ld.Le32(s.P[r.Off:]) } switch o1 >> 26 { case 24, // ori 26, // xori 28: // andi if t>>16 != 0 { goto overflow } default: if int64(int16(t)) != t { goto overflow } } } return int64(int16(t)) case ld.RV_POWER_HA: t += 0x8000 fallthrough // Fallthrough case ld.RV_POWER_HI: t >>= 16 if r.Variant&ld.RV_CHECK_OVERFLOW != 0 { // Whether to check for signed or unsigned // overflow depends on the instruction var o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = ld.Be32(s.P[r.Off-2:]) } else { o1 = ld.Le32(s.P[r.Off:]) } switch o1 >> 26 { case 25, // oris 27, // xoris 29: // andis if t>>16 != 0 { goto overflow } default: if int64(int16(t)) != t { goto overflow } } } return int64(int16(t)) case ld.RV_POWER_DS: var o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = uint32(ld.Be16(s.P[r.Off:])) } else { o1 = uint32(ld.Le16(s.P[r.Off:])) } if t&3 != 0 { ld.Diag("relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) } if (r.Variant&ld.RV_CHECK_OVERFLOW != 0) && int64(int16(t)) != t { goto overflow } return int64(o1)&0x3 | int64(int16(t)) } overflow: ld.Diag("relocation for %s+%d is too big: %d", r.Sym.Name, r.Off, t) return t } func addpltsym(ctxt *ld.Link, s *ld.LSym) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) if ld.Iself { plt := ld.Linklookup(ctxt, ".plt", 0) rela := ld.Linklookup(ctxt, ".rela.plt", 0) if plt.Size == 0 { elfsetupplt() } // Create the glink resolver if necessary glink := ensureglinkresolver() // Write symbol resolver stub (just a branch to the // glink resolver stub) r := ld.Addrel(glink) r.Sym = glink r.Off = int32(glink.Size) r.Siz = 4 r.Type = obj.R_CALLPOWER ld.Adduint32(ctxt, glink, 0x48000000) // b .glink // In the ppc64 ABI, the dynamic linker is responsible // for writing the entire PLT. We just need to // reserve 8 bytes for each PLT entry and generate a // JMP_SLOT dynamic relocation for it. // // TODO(austin): ABI v1 is different s.Plt = int32(plt.Size) plt.Size += 8 ld.Addaddrplus(ctxt, rela, plt, int64(s.Plt)) ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_PPC64_JMP_SLOT)) ld.Adduint64(ctxt, rela, 0) } else { ld.Diag("addpltsym: unsupported binary format") } } // Generate the glink resolver stub if necessary and return the .glink section func ensureglinkresolver() *ld.LSym { glink := ld.Linklookup(ld.Ctxt, ".glink", 0) if glink.Size != 0 { return glink } // This is essentially the resolver from the ppc64 ELF ABI. // At entry, r12 holds the address of the symbol resolver stub // for the target routine and the argument registers hold the // arguments for the target routine. // // This stub is PIC, so first get the PC of label 1 into r11. // Other things will be relative to this. ld.Adduint32(ld.Ctxt, glink, 0x7c0802a6) // mflr r0 ld.Adduint32(ld.Ctxt, glink, 0x429f0005) // bcl 20,31,1f ld.Adduint32(ld.Ctxt, glink, 0x7d6802a6) // 1: mflr r11 ld.Adduint32(ld.Ctxt, glink, 0x7c0803a6) // mtlf r0 // Compute the .plt array index from the entry point address. // Because this is PIC, everything is relative to label 1b (in // r11): // r0 = ((r12 - r11) - (res_0 - r11)) / 4 = (r12 - res_0) / 4 ld.Adduint32(ld.Ctxt, glink, 0x3800ffd0) // li r0,-(res_0-1b)=-48 ld.Adduint32(ld.Ctxt, glink, 0x7c006214) // add r0,r0,r12 ld.Adduint32(ld.Ctxt, glink, 0x7c0b0050) // sub r0,r0,r11 ld.Adduint32(ld.Ctxt, glink, 0x7800f082) // srdi r0,r0,2 // r11 = address of the first byte of the PLT r := ld.Addrel(glink) r.Off = int32(glink.Size) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Siz = 8 r.Type = obj.R_ADDRPOWER // addis r11,0,.plt@ha; addi r11,r11,.plt@l r.Add = 0x3d600000<<32 | 0x396b0000 glink.Size += 8 // Load r12 = dynamic resolver address and r11 = DSO // identifier from the first two doublewords of the PLT. ld.Adduint32(ld.Ctxt, glink, 0xe98b0000) // ld r12,0(r11) ld.Adduint32(ld.Ctxt, glink, 0xe96b0008) // ld r11,8(r11) // Jump to the dynamic resolver ld.Adduint32(ld.Ctxt, glink, 0x7d8903a6) // mtctr r12 ld.Adduint32(ld.Ctxt, glink, 0x4e800420) // bctr // The symbol resolvers must immediately follow. // res_0: // Add DT_PPC64_GLINK .dynamic entry, which points to 32 bytes // before the first symbol resolver stub. s := ld.Linklookup(ld.Ctxt, ".dynamic", 0) ld.Elfwritedynentsymplus(s, ld.DT_PPC64_GLINK, glink, glink.Size-32) return glink } func asmb() { if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime()) } ld.Bso.Flush() if ld.Iself { ld.Asmbelfsetup() } sect := ld.Segtext.Sect ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Codeblk(int64(sect.Vaddr), int64(sect.Length)) for sect = sect.Next; sect != nil; sect = sect.Next { ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(int64(sect.Vaddr), int64(sect.Length)) } if ld.Segrodata.Filelen > 0 { if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f rodatblk\n", obj.Cputime()) } ld.Bso.Flush() ld.Cseek(int64(ld.Segrodata.Fileoff)) ld.Datblk(int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f datblk\n", obj.Cputime()) } ld.Bso.Flush() ld.Cseek(int64(ld.Segdata.Fileoff)) ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) /* output symbol table */ ld.Symsize = 0 ld.Lcsize = 0 symo := uint32(0) if ld.Debug['s'] == 0 { // TODO: rationalize if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f sym\n", obj.Cputime()) } ld.Bso.Flush() switch ld.HEADTYPE { default: if ld.Iself { symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(ld.INITRND))) } case obj.Hplan9: symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) } ld.Cseek(int64(symo)) switch ld.HEADTYPE { default: if ld.Iself { if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f elfsym\n", obj.Cputime()) } ld.Asmelfsym() ld.Cflush() ld.Cwrite(ld.Elfstrdat) if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime()) } ld.Dwarfemitdebugsections() if ld.Linkmode == ld.LinkExternal { ld.Elfemitreloc() } } case obj.Hplan9: ld.Asmplan9sym() ld.Cflush() sym := ld.Linklookup(ld.Ctxt, "pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { ld.Cput(uint8(sym.P[i])) } ld.Cflush() } } } ld.Ctxt.Cursym = nil if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f header\n", obj.Cputime()) } ld.Bso.Flush() ld.Cseek(0) switch ld.HEADTYPE { default: case obj.Hplan9: /* plan 9 */ ld.Thearch.Lput(0x647) /* magic */ ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */ ld.Thearch.Lput(uint32(ld.Segdata.Filelen)) ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) ld.Thearch.Lput(uint32(ld.Symsize)) /* nsyms */ ld.Thearch.Lput(uint32(ld.Entryvalue())) /* va of entry */ ld.Thearch.Lput(0) ld.Thearch.Lput(uint32(ld.Lcsize)) case obj.Hlinux, obj.Hfreebsd, obj.Hnetbsd, obj.Hopenbsd, obj.Hnacl: ld.Asmbelf(int64(symo)) } ld.Cflush() if ld.Debug['c'] != 0 { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen) fmt.Printf("symsize=%d\n", ld.Symsize) fmt.Printf("lcsize=%d\n", ld.Lcsize) fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize)) } }
l(s *ld.L
docker_test.go
/* Copyright 2018 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package gcb import ( "testing" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/pkg/skaffold/util" "github.com/GoogleContainerTools/skaffold/testutil" cloudbuild "google.golang.org/api/cloudbuild/v1" ) func TestDockerBuildDescription(t *testing.T) { artifact := &latest.Artifact{ ArtifactType: latest.ArtifactType{ DockerArtifact: &latest.DockerArtifact{ DockerfilePath: "Dockerfile", BuildArgs: map[string]*string{ "arg1": util.StringPtr("value1"), "arg2": nil, }, }, }, } builder := Builder{ GoogleCloudBuild: &latest.GoogleCloudBuild{ DockerImage: "docker/docker", DiskSizeGb: 100, MachineType: "n1-standard-1", Timeout: "10m", }, } desc, err := builder.buildDescription(artifact, "nginx", "bucket", "object") expected := cloudbuild.Build{ LogsBucket: "bucket", Source: &cloudbuild.Source{ StorageSource: &cloudbuild.StorageSource{ Bucket: "bucket", Object: "object", }, }, Steps: []*cloudbuild.BuildStep{{ Name: "docker/docker", Args: []string{"build", "--tag", "nginx", "-f", "Dockerfile", "--build-arg", "arg1=value1", "--build-arg", "arg2", "."}, }}, Images: []string{"nginx"}, Options: &cloudbuild.BuildOptions{ DiskSizeGb: 100, MachineType: "n1-standard-1", }, Timeout: "10m", } testutil.CheckErrorAndDeepEqual(t, false, err, expected, *desc) } func TestPullCacheFrom(t *testing.T)
{ artifact := &latest.DockerArtifact{ DockerfilePath: "Dockerfile", CacheFrom: []string{"from/image1", "from/image2"}, } builder := Builder{ GoogleCloudBuild: &latest.GoogleCloudBuild{ DockerImage: "docker/docker", }, } steps := builder.dockerBuildSteps(artifact, "nginx2") expected := []*cloudbuild.BuildStep{{ Name: "docker/docker", Entrypoint: "sh", Args: []string{"-c", "docker pull from/image1 || true"}, }, { Name: "docker/docker", Entrypoint: "sh", Args: []string{"-c", "docker pull from/image2 || true"}, }, { Name: "docker/docker", Args: []string{"build", "--tag", "nginx2", "-f", "Dockerfile", "--cache-from", "from/image1", "--cache-from", "from/image2", "."}, }} testutil.CheckDeepEqual(t, expected, steps) }