file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
render_resources.rs
use bevy_macro_utils::BevyManifest; use proc_macro::TokenStream; use quote::{format_ident, quote}; use syn::{ parse::ParseStream, parse_macro_input, punctuated::Punctuated, Data, DataStruct, DeriveInput, Field, Fields, Path, }; #[derive(Default)] struct RenderResourceFieldAttributes { pub ignore: bool, pub buffer: bool, } #[derive(Default)] struct RenderResourceAttributes { pub from_self: bool, } static RENDER_RESOURCE_ATTRIBUTE_NAME: &str = "render_resources"; pub fn derive_render_resources(input: TokenStream) -> TokenStream { let ast = parse_macro_input!(input as DeriveInput); let bevy_render_path: Path = BevyManifest::default().get_path(crate::modules::BEVY_RENDER); let attributes = ast .attrs .iter() .find(|a| *a.path.get_ident().as_ref().unwrap() == RENDER_RESOURCE_ATTRIBUTE_NAME) .map_or_else(RenderResourceAttributes::default, |a| { syn::custom_keyword!(from_self); let mut attributes = RenderResourceAttributes::default(); a.parse_args_with(|input: ParseStream| { if input.parse::<Option<from_self>>()?.is_some() { attributes.from_self = true; } Ok(()) }) .expect("Invalid 'render_resources' attribute format."); attributes }); let struct_name = &ast.ident; let (impl_generics, type_generics, where_clause) = &ast.generics.split_for_impl(); let struct_name_string = struct_name.to_string(); if attributes.from_self { TokenStream::from(quote! { impl #impl_generics #bevy_render_path::renderer::RenderResources for #struct_name #type_generics #where_clause { fn render_resources_len(&self) -> usize { 1 } fn get_render_resource(&self, index: usize) -> Option<&dyn #bevy_render_path::renderer::RenderResource> { if index == 0 { Some(self) } else { None } } fn get_render_resource_name(&self, index: usize) -> Option<&str> { if index == 0 { Some(#struct_name_string) } else { None } } fn iter(&self) -> #bevy_render_path::renderer::RenderResourceIterator { #bevy_render_path::renderer::RenderResourceIterator::new(self) } } }) } else { let empty = Punctuated::new(); let fields = match &ast.data { Data::Struct(DataStruct { fields: Fields::Named(fields), .. }) => &fields.named, Data::Struct(DataStruct { fields: Fields::Unit, .. }) => &empty, _ => panic!("Expected a struct with named fields."), }; let field_attributes = fields .iter() .map(|field| { ( field, field .attrs .iter() .find(|a| { *a.path.get_ident().as_ref().unwrap() == RENDER_RESOURCE_ATTRIBUTE_NAME }) .map_or_else(RenderResourceFieldAttributes::default, |a| { syn::custom_keyword!(ignore); syn::custom_keyword!(buffer); let mut attributes = RenderResourceFieldAttributes::default(); a.parse_args_with(|input: ParseStream| { if input.parse::<Option<ignore>>()?.is_some() { attributes.ignore = true; } else if input.parse::<Option<buffer>>()?.is_some() { attributes.buffer = true; } Ok(()) }) .expect("Invalid 'render_resources' attribute format."); attributes
let mut render_resource_names = Vec::new(); let mut render_resource_fields = Vec::new(); let mut render_resource_hints = Vec::new(); for (field, attrs) in field_attributes.iter() { if attrs.ignore { continue; } let field_ident = field.ident.as_ref().unwrap(); let field_name = field_ident.to_string(); render_resource_fields.push(field_ident); render_resource_names.push(format!("{}_{}", struct_name, field_name)); if attrs.buffer { render_resource_hints .push(quote! {Some(#bevy_render_path::renderer::RenderResourceHints::BUFFER)}) } else { render_resource_hints.push(quote! {None}) } } let render_resource_count = render_resource_names.len(); let render_resource_indices = 0..render_resource_count; let struct_name_uppercase = struct_name_string.to_uppercase(); let render_resource_names_ident = format_ident!("{}_RENDER_RESOURCE_NAMES", struct_name_uppercase); let render_resource_hints_ident = format_ident!("{}_RENDER_RESOURCE_HINTS", struct_name_uppercase); TokenStream::from(quote! { static #render_resource_names_ident: &[&str] = &[ #(#render_resource_names,)* ]; static #render_resource_hints_ident: &[Option<#bevy_render_path::renderer::RenderResourceHints>] = &[ #(#render_resource_hints,)* ]; impl #impl_generics #bevy_render_path::renderer::RenderResources for #struct_name #type_generics #where_clause { fn render_resources_len(&self) -> usize { #render_resource_count } fn get_render_resource(&self, index: usize) -> Option<&dyn #bevy_render_path::renderer::RenderResource> { match index { #(#render_resource_indices => Some(&self.#render_resource_fields),)* _ => None, } } fn get_render_resource_name(&self, index: usize) -> Option<&str> { #render_resource_names_ident.get(index).copied() } fn get_render_resource_hints(&self, index: usize) -> Option<#bevy_render_path::renderer::RenderResourceHints> { #render_resource_hints_ident.get(index).and_then(|o| *o) } fn iter(&self) -> #bevy_render_path::renderer::RenderResourceIterator { #bevy_render_path::renderer::RenderResourceIterator::new(self) } } }) } }
}), ) }) .collect::<Vec<(&Field, RenderResourceFieldAttributes)>>();
operation_group_test.go
package operations import ( "fmt" "io/ioutil" "testing" "time" "github.com/baking-bad/bcdhub/internal/cache" "github.com/baking-bad/bcdhub/internal/config" "github.com/baking-bad/bcdhub/internal/models" "github.com/baking-bad/bcdhub/internal/models/bigmapaction" "github.com/baking-bad/bcdhub/internal/models/bigmapdiff" modelContract "github.com/baking-bad/bcdhub/internal/models/contract" mock_general "github.com/baking-bad/bcdhub/internal/models/mock" mock_bmd "github.com/baking-bad/bcdhub/internal/models/mock/bigmapdiff" mock_block "github.com/baking-bad/bcdhub/internal/models/mock/block" mock_contract "github.com/baking-bad/bcdhub/internal/models/mock/contract" mock_token_balance "github.com/baking-bad/bcdhub/internal/models/mock/tokenbalance" mock_tzip "github.com/baking-bad/bcdhub/internal/models/mock/tzip" "github.com/baking-bad/bcdhub/internal/models/operation" "github.com/baking-bad/bcdhub/internal/models/protocol" "github.com/baking-bad/bcdhub/internal/models/tokenbalance" "github.com/baking-bad/bcdhub/internal/models/transfer" "github.com/baking-bad/bcdhub/internal/models/types" "github.com/baking-bad/bcdhub/internal/models/tzip" "github.com/baking-bad/bcdhub/internal/noderpc" "github.com/baking-bad/bcdhub/internal/parsers" "github.com/baking-bad/bcdhub/internal/parsers/contract" "github.com/golang/mock/gomock" ) func TestGroup_Parse(t *testing.T) { timestamp := time.Now() ctrlStorage := gomock.NewController(t) defer ctrlStorage.Finish() generalRepo := mock_general.NewMockGeneralRepository(ctrlStorage) ctrlBmdRepo := gomock.NewController(t) defer ctrlBmdRepo.Finish() bmdRepo := mock_bmd.NewMockRepository(ctrlBmdRepo) ctrlBlockRepo := gomock.NewController(t) defer ctrlBlockRepo.Finish() blockRepo := mock_block.NewMockRepository(ctrlBlockRepo) ctrlTzipRepo := gomock.NewController(t) defer ctrlTzipRepo.Finish() tzipRepo := mock_tzip.NewMockRepository(ctrlTzipRepo) ctrlContractRepo := gomock.NewController(t) defer ctrlContractRepo.Finish() contractRepo := mock_contract.NewMockRepository(ctrlContractRepo) ctrlTokenBalanceRepo := gomock.NewController(t) defer ctrlTokenBalanceRepo.Finish() tbRepo := mock_token_balance.NewMockRepository(ctrlTokenBalanceRepo) ctrlRPC := gomock.NewController(t) defer ctrlRPC.Finish() rpc := noderpc.NewMockINode(ctrlRPC) ctrlScriptSaver := gomock.NewController(t) defer ctrlScriptSaver.Finish() scriptSaver := contract.NewMockScriptSaver(ctrlScriptSaver) scriptSaver. EXPECT(). Save(gomock.Any(), gomock.Any()). Return(nil).AnyTimes() tzipRepo. EXPECT(). GetWithEvents(gomock.Any()). Return(make([]tzip.TZIP, 0), nil). AnyTimes() tzipRepo. EXPECT(). Get(gomock.Any(), gomock.Any()). Return(nil, nil). AnyTimes() contractRepo. EXPECT(). Get(gomock.Any(), gomock.Any()). DoAndReturn(readTestContractModel). AnyTimes() generalRepo. EXPECT(). Save(gomock.AssignableToTypeOf([]models.Model{})). Return(nil). AnyTimes() generalRepo. EXPECT(). IsRecordNotFound(gomock.Any()). Return(true). AnyTimes() bmdRepo. EXPECT(). GetByPtr( gomock.Eq(types.Carthagenet), gomock.Eq("KT1HBy1L43tiLe5MVJZ5RoxGy53Kx8kMgyoU"), gomock.Eq(int64(2416))). Return([]bigmapdiff.BigMapState{ { Ptr: 2416, Key: []byte(`{"bytes": "000085ef0c18b31983603d978a152de4cd61803db881"}`), KeyHash: "exprtfKNhZ1G8vMscchFjt1G1qww2P93VTLHMuhyThVYygZLdnRev2", Value: []byte(`{"prim":"Pair","args":[[],{"int":"6000"}]}`), LastUpdateLevel: 386026, Contract: "KT1HBy1L43tiLe5MVJZ5RoxGy53Kx8kMgyoU", Network: types.Carthagenet, LastUpdateTime: timestamp, }, }, nil). AnyTimes() for _, ptr := range []int{25167, 25166, 25165, 25164} { bmdRepo. EXPECT(). GetByPtr( gomock.Eq(types.Edo2net), gomock.Eq("KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264"), gomock.Eq(int64(ptr))). Return([]bigmapdiff.BigMapState{}, nil). AnyTimes() } bmdRepo. EXPECT(). GetByPtr( gomock.Eq(types.Carthagenet), gomock.Eq("KT1Dc6A6jTY9sG4UvqKciqbJNAGtXqb4n7vZ"), gomock.Eq(int64(2417))). Return([]bigmapdiff.BigMapState{ { Ptr: 2417, Key: []byte(`{"bytes": "000085ef0c18b31983603d978a152de4cd61803db881"}`), KeyHash: "exprtfKNhZ1G8vMscchFjt1G1qww2P93VTLHMuhyThVYygZLdnRev2", Value: nil, LastUpdateLevel: 386026, Contract: "KT1Dc6A6jTY9sG4UvqKciqbJNAGtXqb4n7vZ", Network: types.Carthagenet, LastUpdateTime: timestamp, }, }, nil). AnyTimes() tests := []struct { name string rpc noderpc.INode ctx *config.Context paramsOpts []ParseParamsOption filename string storage map[string]int64 want *parsers.Result wantErr bool }{ { name: "opToHHcqFhRTQWJv2oTGAtywucj9KM1nDnk5eHsEETYJyvJLsa5", rpc: rpc, ctx: &config.Context{ Storage: generalRepo, Contracts: contractRepo, BigMapDiffs: bmdRepo, Blocks: blockRepo, TZIP: tzipRepo, TokenBalances: tbRepo, Cache: cache.NewCache(), }, paramsOpts: []ParseParamsOption{ WithHead(noderpc.Header{ Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Level: 1068669, ChainID: "NetXdQprcVkpaWU", }), WithNetwork(types.Mainnet), }, filename: "./data/rpc/opg/opToHHcqFhRTQWJv2oTGAtywucj9KM1nDnk5eHsEETYJyvJLsa5.json", want: parsers.NewResult(), }, { name: "opJXaAMkBrAbd1XFd23kS8vXiw63tU4rLUcLrZgqUCpCbhT1Pn9", rpc: rpc, ctx: &config.Context{ Storage: generalRepo, Contracts: contractRepo, BigMapDiffs: bmdRepo, Blocks: blockRepo, TZIP: tzipRepo, TokenBalances: tbRepo, Cache: cache.NewCache(), SharePath: "./test", }, paramsOpts: []ParseParamsOption{ WithHead(noderpc.Header{ Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Level: 1068669, ChainID: "test", }), WithConstants(protocol.Constants{ CostPerByte: 1000, HardGasLimitPerOperation: 1040000, HardStorageLimitPerOperation: 60000, TimeBetweenBlocks: 60, }), WithNetwork(types.Mainnet),
"KT1KemKUx79keZgFW756jQrqKcZJ21y4SPdS": 1068668, }, filename: "./data/rpc/opg/opJXaAMkBrAbd1XFd23kS8vXiw63tU4rLUcLrZgqUCpCbhT1Pn9.json", want: &parsers.Result{ Operations: []*operation.Operation{ { Kind: "transaction", Source: "tz1aSPEN4RTZbn4aXEsxDiix38dDmacGQ8sq", Fee: 37300, Counter: 5791164, GasLimit: 369423, StorageLimit: 90, Destination: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", Status: types.OperationStatusApplied, Level: 1068669, Network: types.Mainnet, Hash: "opJXaAMkBrAbd1XFd23kS8vXiw63tU4rLUcLrZgqUCpCbhT1Pn9", Entrypoint: "transfer", Timestamp: timestamp, Burned: 70000, Initiator: "tz1aSPEN4RTZbn4aXEsxDiix38dDmacGQ8sq", Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Parameters: []byte("{\"entrypoint\":\"default\",\"value\":{\"prim\":\"Right\",\"args\":[{\"prim\":\"Left\",\"args\":[{\"prim\":\"Right\",\"args\":[{\"prim\":\"Right\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"string\":\"tz1aSPEN4RTZbn4aXEsxDiix38dDmacGQ8sq\"},{\"prim\":\"Pair\",\"args\":[{\"string\":\"tz1invbJv3AEm55ct7QF2dVbWZuaDekssYkV\"},{\"int\":\"8010000\"}]}]}]}]}]}]}}"), DeffatedStorage: []byte("{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[[{\"bytes\":\"000056d8b91b541c9d20d51f929dcccca2f14928f1dc\"}],{\"int\":\"62\"}]},{\"prim\":\"Pair\",\"args\":[{\"int\":\"63\"},{\"string\":\"Aspen Digital Token\"}]}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"False\"},{\"bytes\":\"0000a2560a416161def96031630886abe950c4baf036\"}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"False\"},{\"bytes\":\"010d25f77b84dc2164a5d1ce5e8a5d3ca2b1d0cbf900\"}]}]}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"bytes\":\"01796ad78734892d5ae4186e84a30290040732ada700\"},{\"string\":\"ASPD\"}]},{\"int\":\"18000000\"}]}]}"), Tags: []string{"fa1-2"}, }, { Kind: "transaction", Source: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", Destination: "KT19nHqEWZxFFbbDL1b7Y86escgEN7qUShGo", Status: types.OperationStatusApplied, Level: 1068669, Counter: 5791164, Network: types.Mainnet, Hash: "opJXaAMkBrAbd1XFd23kS8vXiw63tU4rLUcLrZgqUCpCbhT1Pn9", Nonce: setInt64(0), Entrypoint: "validateAccounts", Internal: true, Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Initiator: "tz1aSPEN4RTZbn4aXEsxDiix38dDmacGQ8sq", Parameters: []byte("{\"entrypoint\":\"validateAccounts\",\"value\":{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"bytes\":\"0000a2560a416161def96031630886abe950c4baf036\"},{\"bytes\":\"0000fdf98b65d53a9661e07f41093dcb6f3d931736ba\"}]},{\"prim\":\"Pair\",\"args\":[{\"int\":\"14151000\"},{\"int\":\"0\"}]}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"True\"},{\"prim\":\"Pair\",\"args\":[{\"int\":\"8010000\"},{\"int\":\"18000000\"}]}]}]},{\"bytes\":\"01796ad78734892d5ae4186e84a30290040732ada70076616c696461746552756c6573\"}]}}"), DeffatedStorage: []byte("{\"int\":\"61\"}"), Tags: []string{}, }, { Kind: "transaction", Source: "KT19nHqEWZxFFbbDL1b7Y86escgEN7qUShGo", Destination: "KT1KemKUx79keZgFW756jQrqKcZJ21y4SPdS", Status: types.OperationStatusApplied, Level: 1068669, Counter: 5791164, Network: types.Mainnet, Hash: "opJXaAMkBrAbd1XFd23kS8vXiw63tU4rLUcLrZgqUCpCbhT1Pn9", Nonce: setInt64(1), Entrypoint: "validateRules", Internal: true, Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Initiator: "tz1aSPEN4RTZbn4aXEsxDiix38dDmacGQ8sq", Parameters: []byte("{\"entrypoint\":\"validateRules\",\"value\":{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"None\"},{\"string\":\"US\"}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"False\"},{\"bytes\":\"000056d8b91b541c9d20d51f929dcccca2f14928f1dc\"}]}]},{\"int\":\"2\"}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"None\"},{\"string\":\"US\"}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"False\"},{\"bytes\":\"0000c644b537bdb0dac40fe742010106546effd69395\"}]}]},{\"int\":\"6\"}]}]},{\"prim\":\"Pair\",\"args\":[{\"bytes\":\"0000a2560a416161def96031630886abe950c4baf036\"},{\"bytes\":\"0000fdf98b65d53a9661e07f41093dcb6f3d931736ba\"}]}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"int\":\"14151000\"},{\"int\":\"0\"}]},{\"prim\":\"True\"}]}]},{\"prim\":\"Pair\",\"args\":[{\"bytes\":\"01bff38c4e363eacef338f7b2e15f00ca42fafa1ce00\"},{\"prim\":\"Pair\",\"args\":[{\"int\":\"8010000\"},{\"int\":\"18000000\"}]}]}]}}"), DeffatedStorage: []byte("{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"bytes\":\"000056d8b91b541c9d20d51f929dcccca2f14928f1dc\"},{\"bytes\":\"010d25f77b84dc2164a5d1ce5e8a5d3ca2b1d0cbf900\"}]},[]]}"), Tags: []string{}, }, }, BigMapDiffs: []*bigmapdiff.BigMapDiff{ { Ptr: 63, KeyHash: "exprum2qtFLPHdeLWVasKCDw7YD5MrdiD4ra52PY2AUazaNGKyv6tx", Key: []byte(`{"bytes":"0000a2560a416161def96031630886abe950c4baf036"}`), Value: []byte(`{"int":"6141000"}`), Level: 1068669, Network: types.Mainnet, Contract: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Timestamp: timestamp, }, { Ptr: 63, KeyHash: "exprv2snyFbF6EDZd2YAHnnmNBoFt7bbaXhGSWGXHv4a4wnxS359ob", Key: []byte(`{"bytes":"0000fdf98b65d53a9661e07f41093dcb6f3d931736ba"}`), Value: []byte(`{"int":"8010000"}`), Level: 1068669, Network: types.Mainnet, Contract: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Timestamp: timestamp, }, }, BigMapState: []*bigmapdiff.BigMapState{ { Ptr: 63, KeyHash: "exprum2qtFLPHdeLWVasKCDw7YD5MrdiD4ra52PY2AUazaNGKyv6tx", Key: []byte(`{"bytes":"0000a2560a416161def96031630886abe950c4baf036"}`), Value: []byte(`{"int":"6141000"}`), Network: types.Mainnet, Contract: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", LastUpdateLevel: 1068669, LastUpdateTime: timestamp, }, { Ptr: 63, KeyHash: "exprv2snyFbF6EDZd2YAHnnmNBoFt7bbaXhGSWGXHv4a4wnxS359ob", Key: []byte(`{"bytes":"0000fdf98b65d53a9661e07f41093dcb6f3d931736ba"}`), Value: []byte(`{"int":"8010000"}`), Network: types.Mainnet, Contract: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", LastUpdateLevel: 1068669, LastUpdateTime: timestamp, }, }, Transfers: []*transfer.Transfer{ { Network: types.Mainnet, Contract: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", Initiator: "tz1aSPEN4RTZbn4aXEsxDiix38dDmacGQ8sq", Hash: "opJXaAMkBrAbd1XFd23kS8vXiw63tU4rLUcLrZgqUCpCbhT1Pn9", Status: types.OperationStatusApplied, Timestamp: timestamp, Level: 1068669, From: "tz1aSPEN4RTZbn4aXEsxDiix38dDmacGQ8sq", To: "tz1invbJv3AEm55ct7QF2dVbWZuaDekssYkV", TokenID: 0, Amount: newDecimal("8010000"), Counter: 5791164, }, }, TokenBalances: []*tokenbalance.TokenBalance{ { Network: types.Mainnet, Contract: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", Address: "tz1aSPEN4RTZbn4aXEsxDiix38dDmacGQ8sq", TokenID: 0, Balance: newDecimal("-8010000"), }, { Network: types.Mainnet, Contract: "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM", Address: "tz1invbJv3AEm55ct7QF2dVbWZuaDekssYkV", TokenID: 0, Balance: newDecimal("8010000"), }, }, }, }, { name: "opPUPCpQu6pP38z9TkgFfwLiqVBFGSWQCH8Z2PUL3jrpxqJH5gt", rpc: rpc, ctx: &config.Context{ Storage: generalRepo, Contracts: contractRepo, BigMapDiffs: bmdRepo, Blocks: blockRepo, TZIP: tzipRepo, TokenBalances: tbRepo, Cache: cache.NewCache(), SharePath: "./test", }, paramsOpts: []ParseParamsOption{ WithHead(noderpc.Header{ Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Level: 1151495, ChainID: "test", }), WithConstants(protocol.Constants{ CostPerByte: 1000, HardGasLimitPerOperation: 1040000, HardStorageLimitPerOperation: 60000, TimeBetweenBlocks: 60, }), WithNetwork(types.Mainnet), }, storage: map[string]int64{ "KT1Ap287P1NzsnToSJdA4aqSNjPomRaHBZSr": 1151494, "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn": 1151494, }, filename: "./data/rpc/opg/opPUPCpQu6pP38z9TkgFfwLiqVBFGSWQCH8Z2PUL3jrpxqJH5gt.json", want: &parsers.Result{ Operations: []*operation.Operation{ { ContentIndex: 0, Network: types.Mainnet, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Hash: "opPUPCpQu6pP38z9TkgFfwLiqVBFGSWQCH8Z2PUL3jrpxqJH5gt", Internal: false, Nonce: nil, Status: types.OperationStatusApplied, Timestamp: timestamp, Level: 1151495, Kind: "transaction", Initiator: "tz1dMH7tW7RhdvVMR4wKVFF1Ke8m8ZDvrTTE", Source: "tz1dMH7tW7RhdvVMR4wKVFF1Ke8m8ZDvrTTE", Fee: 43074, Counter: 6909186, GasLimit: 427673, StorageLimit: 47, Destination: "KT1Ap287P1NzsnToSJdA4aqSNjPomRaHBZSr", Parameters: []byte("{\"entrypoint\":\"redeem\",\"value\":{\"bytes\":\"a874aac22777351417c9bde0920cc7ed33e54453e1dd149a1f3a60521358d19a\"}}"), Entrypoint: "redeem", DeffatedStorage: []byte("{\"prim\":\"Pair\",\"args\":[{\"int\":\"32\"},{\"prim\":\"Unit\"}]}"), Tags: []string{}, }, { ContentIndex: 0, Network: types.Mainnet, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", Hash: "opPUPCpQu6pP38z9TkgFfwLiqVBFGSWQCH8Z2PUL3jrpxqJH5gt", Internal: true, Nonce: setInt64(0), Status: types.OperationStatusApplied, Timestamp: timestamp, Level: 1151495, Kind: "transaction", Initiator: "tz1dMH7tW7RhdvVMR4wKVFF1Ke8m8ZDvrTTE", Source: "KT1Ap287P1NzsnToSJdA4aqSNjPomRaHBZSr", Counter: 6909186, Destination: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Parameters: []byte("{\"entrypoint\":\"transfer\",\"value\":{\"prim\":\"Pair\",\"args\":[{\"bytes\":\"011871cfab6dafee00330602b4342b6500c874c93b00\"},{\"prim\":\"Pair\",\"args\":[{\"bytes\":\"0000c2473c617946ce7b9f6843f193401203851cb2ec\"},{\"int\":\"7874880\"}]}]}}"), Entrypoint: "transfer", Burned: 47000, DeffatedStorage: []byte("{\"prim\":\"Pair\",\"args\":[{\"int\":\"31\"},{\"prim\":\"Pair\",\"args\":[[{\"prim\":\"DUP\"},{\"prim\":\"CAR\"},{\"prim\":\"DIP\",\"args\":[[{\"prim\":\"CDR\"}]]},{\"prim\":\"DUP\"},{\"prim\":\"DUP\"},{\"prim\":\"CAR\"},{\"prim\":\"DIP\",\"args\":[[{\"prim\":\"CDR\"}]]},{\"prim\":\"DIP\",\"args\":[[{\"prim\":\"DIP\",\"args\":[{\"int\":\"2\"},[{\"prim\":\"DUP\"}]]},{\"prim\":\"DIG\",\"args\":[{\"int\":\"2\"}]}]]},{\"prim\":\"PUSH\",\"args\":[{\"prim\":\"string\"},{\"string\":\"code\"}]},{\"prim\":\"PAIR\"},{\"prim\":\"PACK\"},{\"prim\":\"GET\"},{\"prim\":\"IF_NONE\",\"args\":[[{\"prim\":\"NONE\",\"args\":[{\"prim\":\"lambda\",\"args\":[{\"prim\":\"pair\",\"args\":[{\"prim\":\"bytes\"},{\"prim\":\"big_map\",\"args\":[{\"prim\":\"bytes\"},{\"prim\":\"bytes\"}]}]},{\"prim\":\"pair\",\"args\":[{\"prim\":\"list\",\"args\":[{\"prim\":\"operation\"}]},{\"prim\":\"big_map\",\"args\":[{\"prim\":\"bytes\"},{\"prim\":\"bytes\"}]}]}]}]}],[{\"prim\":\"UNPACK\",\"args\":[{\"prim\":\"lambda\",\"args\":[{\"prim\":\"pair\",\"args\":[{\"prim\":\"bytes\"},{\"prim\":\"big_map\",\"args\":[{\"prim\":\"bytes\"},{\"prim\":\"bytes\"}]}]},{\"prim\":\"pair\",\"args\":[{\"prim\":\"list\",\"args\":[{\"prim\":\"operation\"}]},{\"prim\":\"big_map\",\"args\":[{\"prim\":\"bytes\"},{\"prim\":\"bytes\"}]}]}]}]},{\"prim\":\"IF_NONE\",\"args\":[[{\"prim\":\"PUSH\",\"args\":[{\"prim\":\"string\"},{\"string\":\"UStore: failed to unpack code\"}]},{\"prim\":\"FAILWITH\"}],[]]},{\"prim\":\"SOME\"}]]},{\"prim\":\"IF_NONE\",\"args\":[[{\"prim\":\"DROP\"},{\"prim\":\"DIP\",\"args\":[[{\"prim\":\"DUP\"},{\"prim\":\"PUSH\",\"args\":[{\"prim\":\"bytes\"},{\"bytes\":\"05010000000866616c6c6261636b\"}]},{\"prim\":\"GET\"},{\"prim\":\"IF_NONE\",\"args\":[[{\"prim\":\"PUSH\",\"args\":[{\"prim\":\"string\"},{\"string\":\"UStore: no field fallback\"}]},{\"prim\":\"FAILWITH\"}],[]]},{\"prim\":\"UNPACK\",\"args\":[{\"prim\":\"lambda\",\"args\":[{\"prim\":\"pair\",\"args\":[{\"prim\":\"pair\",\"args\":[{\"prim\":\"string\"},{\"prim\":\"bytes\"}]},{\"prim\":\"big_map\",\"args\":[{\"prim\":\"bytes\"},{\"prim\":\"bytes\"}]}]},{\"prim\":\"pair\",\"args\":[{\"prim\":\"list\",\"args\":[{\"prim\":\"operation\"}]},{\"prim\":\"big_map\",\"args\":[{\"prim\":\"bytes\"},{\"prim\":\"bytes\"}]}]}]}]},{\"prim\":\"IF_NONE\",\"args\":[[{\"prim\":\"PUSH\",\"args\":[{\"prim\":\"string\"},{\"string\":\"UStore: failed to unpack fallback\"}]},{\"prim\":\"FAILWITH\"}],[]]},{\"prim\":\"SWAP\"}]]},{\"prim\":\"PAIR\"},{\"prim\":\"EXEC\"}],[{\"prim\":\"DIP\",\"args\":[[{\"prim\":\"SWAP\"},{\"prim\":\"DROP\"},{\"prim\":\"PAIR\"}]]},{\"prim\":\"SWAP\"},{\"prim\":\"EXEC\"}]]}],{\"prim\":\"Pair\",\"args\":[{\"int\":\"1\"},{\"prim\":\"False\"}]}]}]}"), Tags: []string{"fa1-2"}, }, }, BigMapDiffs: []*bigmapdiff.BigMapDiff{ { Ptr: 32, Key: []byte(`{"bytes": "80729e85e284dff3a30bb24a58b37ccdf474bbbe7794aad439ba034f48d66af3"}`), KeyHash: "exprvJp4s8RJpoXMwD9aQujxWQUiojrkeubesi3X9LDcU3taDfahYR", Level: 1151495, Contract: "KT1Ap287P1NzsnToSJdA4aqSNjPomRaHBZSr", Network: types.Mainnet, Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", }, { Ptr: 31, Key: []byte(`{"bytes":"05010000000b746f74616c537570706c79"}`), KeyHash: "exprunzteC5uyXRHbKnqJd3hUMGTWE9Gv5EtovDZHnuqu6SaGViV3N", Value: []byte(`{"bytes":"050098e1e8d78a02"}`), Level: 1151495, Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Network: types.Mainnet, Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", }, { Ptr: 31, Key: []byte(`{"bytes":"05070701000000066c65646765720a000000160000c2473c617946ce7b9f6843f193401203851cb2ec"}`), KeyHash: "exprv9xaiXBb9KBi67dQoP1SchDyZeKEz3XHiFwBCtHadiKS8wkX7w", Value: []byte(`{"bytes":"0507070080a5c1070200000000"}`), Level: 1151495, Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Network: types.Mainnet, Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", }, { Ptr: 31, Key: []byte(`{"bytes":"05070701000000066c65646765720a00000016011871cfab6dafee00330602b4342b6500c874c93b00"}`), KeyHash: "expruiWsykU9wjNb4aV7eJULLBpGLhy1EuzgD8zB8k7eUTaCk16fyV", Value: []byte(`{"bytes":"05070700ba81bb090200000000"}`), Level: 1151495, Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Network: types.Mainnet, Timestamp: timestamp, Protocol: "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", }, }, BigMapState: []*bigmapdiff.BigMapState{ { Ptr: 32, Key: []byte(`{"bytes":"80729e85e284dff3a30bb24a58b37ccdf474bbbe7794aad439ba034f48d66af3"}`), KeyHash: "exprvJp4s8RJpoXMwD9aQujxWQUiojrkeubesi3X9LDcU3taDfahYR", Contract: "KT1Ap287P1NzsnToSJdA4aqSNjPomRaHBZSr", Network: types.Mainnet, Removed: true, LastUpdateLevel: 1151495, LastUpdateTime: timestamp, }, { Ptr: 31, Key: []byte(`{"bytes":"05010000000b746f74616c537570706c79"}`), KeyHash: "exprunzteC5uyXRHbKnqJd3hUMGTWE9Gv5EtovDZHnuqu6SaGViV3N", Value: []byte(`{"bytes":"050098e1e8d78a02"}`), Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Network: types.Mainnet, LastUpdateLevel: 1151495, LastUpdateTime: timestamp, }, { Ptr: 31, Key: []byte(`{"bytes":"05070701000000066c65646765720a000000160000c2473c617946ce7b9f6843f193401203851cb2ec"}`), KeyHash: "exprv9xaiXBb9KBi67dQoP1SchDyZeKEz3XHiFwBCtHadiKS8wkX7w", Value: []byte(`{"bytes":"0507070080a5c1070200000000"}`), Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Network: types.Mainnet, LastUpdateLevel: 1151495, LastUpdateTime: timestamp, }, { Ptr: 31, Key: []byte(`{"bytes":"05070701000000066c65646765720a00000016011871cfab6dafee00330602b4342b6500c874c93b00"}`), KeyHash: "expruiWsykU9wjNb4aV7eJULLBpGLhy1EuzgD8zB8k7eUTaCk16fyV", Value: []byte(`{"bytes":"05070700ba81bb090200000000"}`), Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Network: types.Mainnet, LastUpdateLevel: 1151495, LastUpdateTime: timestamp, }, }, Transfers: []*transfer.Transfer{ { Network: types.Mainnet, Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Initiator: "KT1Ap287P1NzsnToSJdA4aqSNjPomRaHBZSr", Hash: "opPUPCpQu6pP38z9TkgFfwLiqVBFGSWQCH8Z2PUL3jrpxqJH5gt", Status: types.OperationStatusApplied, Timestamp: timestamp, Level: 1151495, From: "KT1Ap287P1NzsnToSJdA4aqSNjPomRaHBZSr", To: "tz1dMH7tW7RhdvVMR4wKVFF1Ke8m8ZDvrTTE", TokenID: 0, Amount: newDecimal("7874880"), Counter: 6909186, Nonce: setInt64(0), }, }, TokenBalances: []*tokenbalance.TokenBalance{ { Network: types.Mainnet, Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Address: "KT1Ap287P1NzsnToSJdA4aqSNjPomRaHBZSr", TokenID: 0, Balance: newDecimal("-7874880"), }, { Network: types.Mainnet, Contract: "KT1PWx2mnDueood7fEmfbBDKx1D9BAnnXitn", Address: "tz1dMH7tW7RhdvVMR4wKVFF1Ke8m8ZDvrTTE", TokenID: 0, Balance: newDecimal("7874880"), }, }, }, }, { name: "onzUDQhwunz2yqzfEsoURXEBz9p7Gk8DgY4QBva52Z4b3AJCZjt", rpc: rpc, ctx: &config.Context{ Storage: generalRepo, Contracts: contractRepo, BigMapDiffs: bmdRepo, Blocks: blockRepo, TZIP: tzipRepo, TokenBalances: tbRepo, Cache: cache.NewCache(), SharePath: "./test", }, paramsOpts: []ParseParamsOption{ WithHead(noderpc.Header{ Timestamp: timestamp, Protocol: "PsDELPH1Kxsxt8f9eWbxQeRxkjfbxoqM52jvs5Y5fBxWWh4ifpo", Level: 86142, ChainID: "test", }), WithConstants(protocol.Constants{ CostPerByte: 250, HardGasLimitPerOperation: 1040000, HardStorageLimitPerOperation: 60000, TimeBetweenBlocks: 30, }), WithNetwork(types.Delphinet), }, storage: map[string]int64{ "KT1NppzrgyLZD3aku7fssfhYPm5QqZwyabvR": 86142, }, filename: "./data/rpc/opg/onzUDQhwunz2yqzfEsoURXEBz9p7Gk8DgY4QBva52Z4b3AJCZjt.json", want: &parsers.Result{ Operations: []*operation.Operation{ { ContentIndex: 0, Network: types.Delphinet, Protocol: "PsDELPH1Kxsxt8f9eWbxQeRxkjfbxoqM52jvs5Y5fBxWWh4ifpo", Hash: "onzUDQhwunz2yqzfEsoURXEBz9p7Gk8DgY4QBva52Z4b3AJCZjt", Internal: false, Status: types.OperationStatusApplied, Timestamp: timestamp, Level: 86142, Kind: "origination", Initiator: "tz1SX7SPdx4ZJb6uP5Hh5XBVZhh9wTfFaud3", Source: "tz1SX7SPdx4ZJb6uP5Hh5XBVZhh9wTfFaud3", Fee: 510, Counter: 654594, GasLimit: 1870, StorageLimit: 371, Amount: 0, Destination: "KT1NppzrgyLZD3aku7fssfhYPm5QqZwyabvR", Burned: 87750, AllocatedDestinationContractBurned: 64250, DeffatedStorage: []byte("{\"int\":\"0\"}\n"), Tags: nil, }, }, Contracts: []*modelContract.Contract{ { Network: types.Delphinet, Level: 86142, Timestamp: timestamp, Language: "unknown", Hash: "e4b88b53b9227b3fc4fc0dbe148f249a7a1c755cf4cbc9c8fb5b5b78395a139d3f8e0fde5c27117df30553e98ecb4e3e8ddc9740292af18fbf36326cb55cebad", Tags: []string{}, Entrypoints: []string{"decrement", "increment"}, Address: "KT1NppzrgyLZD3aku7fssfhYPm5QqZwyabvR", Manager: "tz1SX7SPdx4ZJb6uP5Hh5XBVZhh9wTfFaud3", }, }, }, }, { name: "onv6Q1dNejAGEJeQzwRannWsDSGw85FuFdhLnBrY18TBcC9p8kC", rpc: rpc, ctx: &config.Context{ Storage: generalRepo, Contracts: contractRepo, BigMapDiffs: bmdRepo, Blocks: blockRepo, TZIP: tzipRepo, TokenBalances: tbRepo, Cache: cache.NewCache(), SharePath: "./test", }, paramsOpts: []ParseParamsOption{ WithHead(noderpc.Header{ Timestamp: timestamp, Protocol: "PsddFKi32cMJ2qPjf43Qv5GDWLDPZb3T3bF6fLKiF5HtvHNU7aP", Level: 301436, ChainID: "test", }), WithConstants(protocol.Constants{ CostPerByte: 1000, HardGasLimitPerOperation: 400000, HardStorageLimitPerOperation: 60000, TimeBetweenBlocks: 60, }), WithNetwork(types.Mainnet), }, storage: map[string]int64{ "KT1AbjG7vtpV8osdoJXcMRck8eTwst8dWoz4": 301436, }, filename: "./data/rpc/opg/onv6Q1dNejAGEJeQzwRannWsDSGw85FuFdhLnBrY18TBcC9p8kC.json", want: &parsers.Result{ Operations: []*operation.Operation{ { Kind: "origination", Source: "tz1MXrEgDNnR8PDryN8sq4B2m9Pqcf57wBqM", Fee: 1555, Counter: 983250, GasLimit: 12251, StorageLimit: 351, Destination: "KT1AbjG7vtpV8osdoJXcMRck8eTwst8dWoz4", Status: types.OperationStatusApplied, Level: 301436, Network: types.Mainnet, Hash: "onv6Q1dNejAGEJeQzwRannWsDSGw85FuFdhLnBrY18TBcC9p8kC", Timestamp: timestamp, Burned: 331000, Initiator: "tz1MXrEgDNnR8PDryN8sq4B2m9Pqcf57wBqM", Protocol: "PsddFKi32cMJ2qPjf43Qv5GDWLDPZb3T3bF6fLKiF5HtvHNU7aP", DeffatedStorage: []byte("[]"), AllocatedDestinationContractBurned: 257000, }, }, Contracts: []*modelContract.Contract{ { Network: types.Mainnet, Level: 301436, Timestamp: timestamp, Language: "unknown", Hash: "0569cf67a58ae603cbfa740c3181b588608f8967e8a7d1ea49e00c9325e9e1b67dc32cd1ec1f9cdc73699dd793ded16ac6f14511b61b63240e8f647b3aed17a3", Tags: []string{}, Entrypoints: []string{"default"}, Address: "KT1AbjG7vtpV8osdoJXcMRck8eTwst8dWoz4", Manager: "tz1MXrEgDNnR8PDryN8sq4B2m9Pqcf57wBqM", }, }, }, }, { name: "op4fFMvYsxvSUKZmLWC7aUf25VMYqigaDwTZCAoBBi8zACbHTNg", rpc: rpc, ctx: &config.Context{ Storage: generalRepo, Contracts: contractRepo, BigMapDiffs: bmdRepo, Blocks: blockRepo, TZIP: tzipRepo, TokenBalances: tbRepo, Cache: cache.NewCache(), SharePath: "./test", }, paramsOpts: []ParseParamsOption{ WithHead(noderpc.Header{ Timestamp: timestamp, Protocol: "PtEdo2ZkT9oKpimTah6x2embF25oss54njMuPzkJTEi5RqfdZFA", Level: 72207, ChainID: "test", }), WithConstants(protocol.Constants{ CostPerByte: 1000, HardGasLimitPerOperation: 400000, HardStorageLimitPerOperation: 60000, TimeBetweenBlocks: 60, }), WithNetwork(types.Edo2net), }, storage: map[string]int64{ "KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264": 72206, "KT1JgHoXtZPjVfG82BY3FSys2VJhKVZo2EJU": 72207, }, filename: "./data/rpc/opg/op4fFMvYsxvSUKZmLWC7aUf25VMYqigaDwTZCAoBBi8zACbHTNg.json", want: &parsers.Result{ Operations: []*operation.Operation{ { Kind: "transaction", Source: "tz1gXhGAXgKvrXjn4t16rYUXocqbch1XXJFN", Fee: 4045, Counter: 155670, GasLimit: 37831, StorageLimit: 5265, Destination: "KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264", Status: types.OperationStatusApplied, Level: 72207, Network: types.Edo2net, Hash: "op4fFMvYsxvSUKZmLWC7aUf25VMYqigaDwTZCAoBBi8zACbHTNg", Timestamp: timestamp, Entrypoint: "@entrypoint_1", Initiator: "tz1gXhGAXgKvrXjn4t16rYUXocqbch1XXJFN", Parameters: []byte("{\"entrypoint\":\"default\",\"value\":{\"prim\":\"Right\",\"args\":[{\"prim\":\"Unit\"}]}}"), Protocol: "PtEdo2ZkT9oKpimTah6x2embF25oss54njMuPzkJTEi5RqfdZFA", DeffatedStorage: []byte("{\"prim\":\"Pair\",\"args\":[{\"bytes\":\"0000e527ed176ccf8f8297f674a9886a2ba8a55818d9\"},{\"prim\":\"Left\",\"args\":[{\"bytes\":\"016ebc941b2ae4e305470f392fa050e41ca1e52b4500\"}]}]}"), }, { Kind: "origination", Source: "KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264", Nonce: setInt64(0), Destination: "KT1JgHoXtZPjVfG82BY3FSys2VJhKVZo2EJU", Status: types.OperationStatusApplied, Level: 72207, Network: types.Edo2net, Hash: "op4fFMvYsxvSUKZmLWC7aUf25VMYqigaDwTZCAoBBi8zACbHTNg", Timestamp: timestamp, Burned: 5245000, Counter: 155670, Internal: true, Initiator: "tz1gXhGAXgKvrXjn4t16rYUXocqbch1XXJFN", Protocol: "PtEdo2ZkT9oKpimTah6x2embF25oss54njMuPzkJTEi5RqfdZFA", AllocatedDestinationContractBurned: 257000, DeffatedStorage: []byte("{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"string\":\"tz1QozfhaUW4wLnohDo6yiBUmh7cPCSXE9Af\"},[]]},{\"int\":\"25168\"},{\"int\":\"25169\"}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Left\",\"args\":[{\"prim\":\"Unit\"}]},{\"int\":\"25170\"}]},{\"string\":\"tz1QozfhaUW4wLnohDo6yiBUmh7cPCSXE9Af\"},{\"int\":\"0\"}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[[],{\"int\":\"25171\"}]},{\"int\":\"2\"},{\"string\":\"tz1QozfhaUW4wLnohDo6yiBUmh7cPCSXE9Af\"}]},{\"int\":\"11\"}]},{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[{\"prim\":\"Pair\",\"args\":[[],[[{\"prim\":\"DUP\"},{\"prim\":\"CAR\"},{\"prim\":\"DIP\",\"args\":[[{\"prim\":\"CDR\"}]]}],{\"prim\":\"DROP\"},{\"prim\":\"NIL\",\"args\":[{\"prim\":\"operation\"}]},{\"prim\":\"PAIR\"}]]},{\"int\":\"500\"},{\"int\":\"1000\"}]},{\"prim\":\"Pair\",\"args\":[{\"int\":\"1000\"},{\"int\":\"2592000\"}]},{\"int\":\"1\"},{\"int\":\"1\"}]},[{\"prim\":\"DROP\"},{\"prim\":\"PUSH\",\"args\":[{\"prim\":\"bool\"},{\"prim\":\"True\"}]}],[{\"prim\":\"DROP\"},{\"prim\":\"PUSH\",\"args\":[{\"prim\":\"nat\"},{\"int\":\"0\"}]}]]}"), }, }, BigMapActions: []*bigmapaction.BigMapAction{ { Action: "remove", SourcePtr: setInt64(25167), Level: 72207, Address: "KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264", Network: types.Edo2net, Timestamp: timestamp, }, { Action: "remove", SourcePtr: setInt64(25166), Level: 72207, Address: "KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264", Network: types.Edo2net, Timestamp: timestamp, }, { Action: "remove", SourcePtr: setInt64(25165), Level: 72207, Address: "KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264", Network: types.Edo2net, Timestamp: timestamp, }, { Action: "remove", SourcePtr: setInt64(25164), Level: 72207, Address: "KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264", Network: types.Edo2net, Timestamp: timestamp, }, { Action: "copy", SourcePtr: setInt64(25167), DestinationPtr: setInt64(25171), Level: 72207, Address: "KT1JgHoXtZPjVfG82BY3FSys2VJhKVZo2EJU", Network: types.Edo2net, Timestamp: timestamp, }, { Action: "copy", SourcePtr: setInt64(25166), DestinationPtr: setInt64(25170), Level: 72207, Address: "KT1JgHoXtZPjVfG82BY3FSys2VJhKVZo2EJU", Network: types.Edo2net, Timestamp: timestamp, }, { Action: "copy", SourcePtr: setInt64(25165), DestinationPtr: setInt64(25169), Level: 72207, Address: "KT1JgHoXtZPjVfG82BY3FSys2VJhKVZo2EJU", Network: types.Edo2net, Timestamp: timestamp, }, { Action: "copy", SourcePtr: setInt64(25164), DestinationPtr: setInt64(25168), Level: 72207, Address: "KT1JgHoXtZPjVfG82BY3FSys2VJhKVZo2EJU", Network: types.Edo2net, Timestamp: timestamp, }, }, Contracts: []*modelContract.Contract{ { Network: types.Edo2net, Level: 72207, Timestamp: timestamp, Language: "unknown", Hash: "d3bfdacb039f6e8added88c45046b7a8f6a2b91744859ace29f4c19294c9a394857598e2b331394cac91a7a2c543cadaa60282c5eb2c87f83f001f5e563cea36", Tags: []string{"ledger", "fa2"}, FailStrings: []string{"FA2_INSUFFICIENT_BALANCE"}, Annotations: []string{"%token_address", "%drop_proposal", "%transfer_contract_tokens", "%permits_counter", "%remove_operator", "%mint", "%ledger", "%voters", "%owner", "%balance", "%transfer", "%from_", "%max_voting_period", "%not_in_migration", "%start_date", "%custom_entrypoints", "%proposal_check", "%accept_ownership", "%migrate", "%set_quorum_threshold", "%amount", "%proposals", "%min_voting_period", "%rejected_proposal_return_value", "%burn", "%flush", "%max_quorum_threshold", "%migratingTo", "%operators", "%proposer", "%call_FA2", "%argument", "%params", "%transfer_ownership", "%voting_period", "%request", "%confirm_migration", "%frozen_token", "%param", "%admin", "%migration_status", "%proposal_key_list_sort_by_date", "%requests", "%update_operators", "%add_operator", "%getVotePermitCounter", "%propose", "%vote", "%vote_amount", "%proposer_frozen_token", "%callCustom", "%txs", "%operator", "%quorum_threshold", "%to_", "%set_voting_period", "%callback", "%contract_address", "%downvotes", "%max_votes", "%balance_of", "%proposal_key", "%vote_type", "%signature", "%decision_lambda", "%token_id", "%permit", "%key", "%extra", "%pending_owner", "%upvotes", "%max_proposals", "%min_quorum_threshold", "%proposal_metadata", "%metadata", "%migratedTo"}, Entrypoints: []string{"callCustom", "accept_ownership", "burn", "balance_of", "transfer", "update_operators", "confirm_migration", "drop_proposal", "flush", "getVotePermitCounter", "migrate", "mint", "propose", "set_quorum_threshold", "set_voting_period", "transfer_ownership", "vote", "transfer_contract_tokens"}, Address: "KT1JgHoXtZPjVfG82BY3FSys2VJhKVZo2EJU", Manager: "KT1C2MfcjWb5R1ZDDxVULCsGuxrf5fEn5264", }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for address, level := range tt.storage { rpc. EXPECT(). GetScriptStorageRaw(address, level). DoAndReturn( func(address string, level int64) ([]byte, error) { storageFile := fmt.Sprintf("./data/rpc/script/storage/%s_%d.json", address, level) return ioutil.ReadFile(storageFile) }, ). AnyTimes() } var op noderpc.OperationGroup if err := readJSONFile(tt.filename, &op); err != nil { t.Errorf(`readJSONFile("%s") = error %v`, tt.filename, err) return } parseParams, err := NewParseParams(tt.rpc, tt.ctx, tt.paramsOpts...) if err != nil { t.Errorf(`NewParseParams = error %v`, err) return } opg := NewGroup(parseParams) got, err := opg.Parse(op) if (err != nil) != tt.wantErr { t.Errorf("Group.Parse() error = %v, wantErr %v", err, tt.wantErr) return } if !compareParserResponse(t, got, tt.want) { t.Errorf("Group.Parse() = %#v, want %#v", got, tt.want) } }) } }
}, storage: map[string]int64{ "KT1S5iPRQ612wcNm6mXDqDhTNegGFcvTV7vM": 1068668, "KT19nHqEWZxFFbbDL1b7Y86escgEN7qUShGo": 1068668,
vnbitmex.py
# encoding: UTF-8 from __future__ import print_function import hashlib import hmac import json import ssl import traceback from queue import Queue, Empty from multiprocessing.dummy import Pool from time import time from urlparse import urlparse from copy import copy from urllib import urlencode from threading import Thread from six.moves import input import requests import websocket REST_HOST = 'https://www.bitmex.com/api/v1' WEBSOCKET_HOST = 'wss://www.bitmex.com/realtime' TESTNET_REST_HOST = 'https://testnet.bitmex.com/api/v1' TESTNET_WEBSOCKET_HOST = 'wss://testnet.bitmex.com/realtime' ######################################################################## class BitmexRestApi(object): """REST API""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.apiKey = '' self.apiSecret = '' self.host = '' self.active = False self.reqid = 0 self.queue = Queue() self.pool = None self.sessionDict = {} # 会话对象字典 self.header = { 'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json' } #---------------------------------------------------------------------- def init(self, apiKey, apiSecret, testnet=False): """初始化""" self.apiKey = apiKey self.apiSecret = apiSecret if testnet: self.host = TESTNET_REST_HOST else: self.host = REST_HOST #---------------------------------------------------------------------- def start(self, n=3): """启动""" if self.active: return self.active = True self.pool = Pool(n) self.pool.map_async(self.run, range(n)) #---------------------------------------------------------------------- def close(self): """关闭""" self.active = False if self.pool: self.pool.close() self.pool.join() #---------------------------------------------------------------------- def addReq(self, method, path, callback, params=None, postdict=None): """添加请求""" self.reqid += 1 req = (method, path, callback, params, postdict, self.reqid) self.queue.put(req) return self.reqid #---------------------------------------------------------------------- def processReq(self, req, i): """处理请求""" method, path, callback, params, postdict, reqid = req url = self.host + path expires = int(time() + 5) rq = requests.Request(url=url, data=postdict) p = rq.prepare() header = copy(self.header) header['api-expires'] = str(expires) header['api-key'] = self.apiKey header['api-signature'] = self.generateSignature(method, path, expires, params, body=p.body) # 使用长连接的session,比短连接的耗时缩短80% session = self.sessionDict[i] resp = session.request(method, url, headers=header, params=params, data=postdict) #resp = requests.request(method, url, headers=header, params=params, data=postdict) code = resp.status_code d = resp.json() if code == 200: callback(d, reqid) else: self.onError(code, d) #---------------------------------------------------------------------- def run(self, i): """连续运行""" self.sessionDict[i] = requests.Session() while self.active: try: req = self.queue.get(timeout=1) self.processReq(req, i) except Empty: pass #---------------------------------------------------------------------- def generateSignature(self, method, path, expires, params=None, body=None): """生成签名""" # 对params在HTTP报文路径中,以请求字段方式序列化 if params: query = urlencode(params.items()) path = path + '?' + query if body is None: body = '' msg = method + '/api/v1' + path + str(expires) + body signature = hmac.new(self.apiSecret, msg, digestmod=hashlib.sha256).hexdigest() return signature #---------------------------------------------------------------------- def onError(self, code, error): """错误回调""" print('on error') print(code, error) #---------------------------------------------------------------------- def onData(self, data, reqid): """通用回调""" print('on data') print(data, reqid) ######################################################################## class BitmexWebsocketApi(object): """Websocket API""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.ws = None self.thread = None self.active = False self.host = ''
#---------------------------------------------------------------------- def start(self, testnet=False): """启动""" if testnet: self.host = TESTNET_WEBSOCKET_HOST else: self.host = WEBSOCKET_HOST self.connectWs() self.active = True self.thread = Thread(target=self.run) self.thread.start() self.onConnect() #---------------------------------------------------------------------- def reconnect(self): """重连""" self.connectWs() self.onConnect() #---------------------------------------------------------------------- def connectWs(self): """""" self.ws = websocket.create_connection(self.host, sslopt={'cert_reqs': ssl.CERT_NONE}) #---------------------------------------------------------------------- def run(self): """运行""" while self.active: try: stream = self.ws.recv() data = json.loads(stream) self.onData(data) except: msg = traceback.format_exc() self.onError(msg) self.reconnect() #---------------------------------------------------------------------- def close(self): """关闭""" self.active = False if self.thread: self.thread.join() #---------------------------------------------------------------------- def onConnect(self): """连接回调""" print('connected') #---------------------------------------------------------------------- def onData(self, data): """数据回调""" print('-' * 30) l = data.keys() l.sort() for k in l: print(k, data[k]) #---------------------------------------------------------------------- def onError(self, msg): """错误回调""" print(msg) #---------------------------------------------------------------------- def sendReq(self, req): """发出请求""" self.ws.send(json.dumps(req)) if __name__ == '__main__': API_KEY = '' API_SECRET = '' ## REST测试 rest = BitmexRestApi() rest.init(API_KEY, API_SECRET) rest.start(3) data = { 'symbol': 'XBTUSD' } rest.addReq('POST', '/position/isolate', rest.onData, postdict=data) #rest.addReq('GET', '/instrument', rest.onData) # WEBSOCKET测试 #ws = BitmexWebsocketApi() #ws.start() #req = {"op": "subscribe", "args": ['order', 'trade', 'position', 'margin']} #ws.sendReq(req) #expires = int(time()) #method = 'GET' #path = '/realtime' #msg = method + path + str(expires) #signature = hmac.new(API_SECRET, msg, digestmod=hashlib.sha256).hexdigest() #req = { #'op': 'authKey', #'args': [API_KEY, expires, signature] #} #ws.sendReq(req) #req = {"op": "subscribe", "args": ['order', 'execution', 'position', 'margin']} #req = {"op": "subscribe", "args": ['instrument']} #ws.sendReq(req) input()
xpath.py
newchat_xpath= "//*[@id='side']/header/div[2]/div/span/div[2]" search_xpath= "//*[@id='app']/div/div/div[2]/div[1]/span/div/span/div/div[1]/div/label/div/div[2]"
sendbutton_xpath= "//*[@id='main']/footer/div[1]/div[3]"
#user_xpath= "//span[@title='{}']" message_xpath= "//*[@id='main']/footer/div[1]/div[2]/div/div[2]"
unit_of_work.py
from __future__ import annotations import abc from alocacao.adapters import repository from alocacao.config import DEFAULT_SESSION_FACTORY class AbstractUOW(abc.ABC): produtos: repository.AbstractRepository def __enter__(self) -> AbstractUOW: return self def __exit__(self, *args): self.rollback() def commit(self): self._commit() def collect_new_messages(self): for produto in self.produtos.seen: while produto.eventos: yield produto.eventos.pop(0) @abc.abstractmethod def _commit(self): pass @abc.abstractmethod def rollback(self): pass class SQLAlchemyUOW(AbstractUOW): def __init__(self, session_factory=DEFAULT_SESSION_FACTORY): self.session_factory = session_factory def __enter__(self): self.session = self.session_factory() self.produtos = repository.TrackingRepository( repository.SQLAlchemyRepository(self.session) ) return super().__enter__() def __exit__(self, *args):
def _commit(self): self.session.commit() def rollback(self): self.session.rollback()
super().__exit__(*args) self.session.close()
utils.ts
/** * Used to create an escaped regex for the non-SharePoint request tests * */ export function
(s: string): RegExp { let s2 = s.replace(/\(/g, "\\("); s2 = s2.replace(/\)/g, "\\)"); s2 = s2.replace(/\?/g, "\\?"); s2 = s2.replace(/\$/g, "\\$"); return new RegExp(`${s2}$`, "i"); }
toMatchEndRegex
prompt_test.go
package prompt_test import ( "bytes" "fmt" "testing" "github.com/boson-project/faas/prompt" ) // TestForStringLabel ensures that a string prompt with a given label is printed to stdout. func TestForStringLabel(t *testing.T) { var out bytes.Buffer var in bytes.Buffer in.Write([]byte("\n")) // Empty label _ = prompt.ForString("", "", prompt.WithInput(&in), prompt.WithOutput(&out)) if out.String() != ": " { t.Fatalf("expected output to be ': ', got '%v'\n", out.String()) } out.Reset() in.Reset() in.Write([]byte("\n")) // Populated lable _ = prompt.ForString("Name", "", prompt.WithInput(&in), prompt.WithOutput(&out)) if out.String() != "Name: " { t.Fatalf("expected 'Name', got '%v'\n", out.String()) } } // TestForStringLabelDefault ensures that a default, only if provided, is appended // to the prompt label. func TestForStringLabelDefault(t *testing.T) { var out bytes.Buffer var in bytes.Buffer in.Write([]byte("\n")) // [ENTER] // No lablel but a default _ = prompt.ForString("", "Alice", prompt.WithInput(&in), prompt.WithOutput(&out)) if out.String() != "(Alice): " { t.Fatalf("expected '(Alice): ', got '%v'\n", out.String()) } out.Reset() in.Reset() in.Write([]byte("\n")) // [ENTER] // Label with default _ = prompt.ForString("Name", "Alice", prompt.WithInput(&in), prompt.WithOutput(&out)) if out.String() != "Name (Alice): " { t.Fatalf("expected 'Name (Alice): ', got '%v'\n", out.String()) } } // TestForStringLabelDelimiter ensures that a default delimiter override is respected. func TestWithDelimiter(t *testing.T) { var out bytes.Buffer var in bytes.Buffer in.Write([]byte("\n")) // [ENTER] _ = prompt.ForString("", "", prompt.WithInput(&in), prompt.WithOutput(&out), prompt.WithDelimiter("Δ")) if out.String() != "Δ" { t.Fatalf("expected output to be 'Δ', got '%v'\n", out.String()) } } // TestForStringDefault ensures that the default is returned when enter is // pressed on a string input. func TestForStringDefault(t *testing.T) { var out bytes.Buffer var in bytes.Buffer in.Write([]byte("\n")) // [ENTER] // Empty default should return an empty value. s := prompt.ForString("", "", prompt.WithInput(&in), prompt.WithOutput(&out)) if s != "" { t.Fatalf("expected '', got '%v'\n", s) } in.Reset() out.Reset() in.Write([]byte("\n")) // [ENTER] // Extant default should be returned s = prompt.ForString("", "default", prompt.WithInput(&in), prompt.WithOutput(&out)) if s != "default" { t.Fatalf("expected 'default', got '%v'\n", s) } } // TestForStringRequired ensures that an error is generated if a value is not // provided for a required prompt with no default. func TestForStringRequired(t *testing.T) { var out bytes.Buffer var in bytes.Buffer in.Write([]byte("\n")) // [ENTER] _ = prompt.ForString("", "", prompt.WithInput(&in), prompt.WithOutput(&out), prompt.WithRequired(true), prompt.WithRetryLimit(1)) // makes the output buffer easier to confirm output := out.String() expected := ": \nplease enter a value\n: " if output != expected { t.Fatalf("Unexpected prompt received for a required value. expected '%v', got '%v'", expected, output) } } // TestForString ensures that string input is accepted. func TestForString(t *testing.T) {
/ TestForBoolLabel ensures that a prompt for a given boolean prompt prints // the expected y/n prompt. func TestForBoolLabel(t *testing.T) { var out bytes.Buffer var in bytes.Buffer in.Write([]byte("\n")) // Empty label, default false _ = prompt.ForBool("", false, prompt.WithInput(&in), prompt.WithOutput(&out)) if out.String() != "(y/N): " { t.Fatalf("expected output to be '(y/N): ', got '%v'\n", out.String()) } out.Reset() in.Reset() in.Write([]byte("\n")) // Empty label, default true _ = prompt.ForBool("", true, prompt.WithInput(&in), prompt.WithOutput(&out)) if out.String() != "(Y/n): " { t.Fatalf("expected output to be '(Y/n): ', got '%v'\n", out.String()) } out.Reset() in.Reset() in.Write([]byte("\n")) // Populated lablel default false _ = prompt.ForBool("Local", false, prompt.WithInput(&in), prompt.WithOutput(&out)) if out.String() != "Local (y/N): " { t.Fatalf("expected 'Local (y/N): ', got '%v'\n", out.String()) } } // TestForBoolDefault ensures that the default is returned when no user input is given. func TestForBoolDefault(t *testing.T) { var out bytes.Buffer var in bytes.Buffer in.Write([]byte("\n")) b := prompt.ForBool("", false, prompt.WithInput(&in), prompt.WithOutput(&out)) if b != false { t.Fatal("expected default of false to be returned when user accepts.") } out.Reset() in.Reset() in.Write([]byte("\n")) b = prompt.ForBool("", true, prompt.WithInput(&in), prompt.WithOutput(&out)) if b != true { t.Fatal("expected default of true to be returned when user accepts.") } } // TestForBool ensures that a truthy value, when entered, is returned as a bool. func TestForBool(t *testing.T) { var out bytes.Buffer var in bytes.Buffer cases := []struct { in string out bool }{ {"true", true}, {"1", true}, {"y", true}, {"Y", true}, {"yes", true}, {"Yes", true}, {"YES", true}, {"false", false}, {"0", false}, {"n", false}, {"N", false}, {"no", false}, {"No", false}, {"NO", false}, } for _, c := range cases { in.Reset() out.Reset() fmt.Fprintf(&in, "%v\n", c.in) // Note the default value is always the oposite of the input // to ensure it is flipped. b := prompt.ForBool("", !c.out, prompt.WithInput(&in), prompt.WithOutput(&out)) if b != c.out { t.Fatalf("expected '%v' to be an acceptable %v.", c.in, c.out) } } }
var in bytes.Buffer var out bytes.Buffer in.Write([]byte("hunter2\n")) s := prompt.ForString("", "", prompt.WithInput(&in), prompt.WithOutput(&out)) if s != "hunter2" { t.Fatalf("Expected 'hunter2' got '%v'", s) } } /
UserSearch.tsx
import { useState, useRef, useEffect } from "react"; const users = [ { name: 'Sarah', age: 20 }, { name: 'Alex', age: 20 },
{ name: 'Michael', age: 20 } ]; const UserSearch: React.FC = () => { const inputRef = useRef<HTMLInputElement | null>(null); // const inputRef = useRef<any>(null); const [name, setName] = useState(''); const [user, setUser] = useState<{ name: string, age: number } | undefined>(); useEffect(() => { if (!inputRef.current) { return; } inputRef.current.focus(); }, []); const onClick = () => { const foundUser = users.find((user) => { return user.name === name; }); setUser(foundUser) }; return ( <div> User Search <input ref={inputRef} value={name} onChange={e => setName(e.target.value)} /> <button onClick={onClick}>Find User</button> <div> {user && user.name} {user && user.age} </div> </div> ); }; export default UserSearch;
mod.rs
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. pub(crate) mod catalog; pub(crate) mod util; use crate::plan::query::QueryLifetime; use crate::plan::StatementContext; use mz_expr_test_util::generate_explanation; use mz_lowertest::*; use crate::query_model::Model; use catalog::TestCatalog; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// Tests to run on a Query Graph Model. #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, MzReflect)] enum Directive { // TODO: support build apply=(<stuff>) /// Apply any number of specific changes to the model. Build, /// Decorrelate the model and convert it to a `MirRelationExpr`. Lower, /// Optimize the model. Opt, /// Optimize and decorrelate the model. Then convert it to a `MirRelationExpr`. EndToEnd, } lazy_static! { pub static ref RTI: ReflectedTypeInfo = { let mut rti = ReflectedTypeInfo::default(); Directive::add_to_reflected_type_info(&mut rti); rti }; } /// Convert the input string to a Query Graph Model. fn convert_input_to_model(input: &str, catalog: &TestCatalog) -> Result<Model, String> { // TODO (#9347): Support parsing specs for HirRelationExpr. // TODO (#10518): Support parsing specs for QGM. // match parse_input_as_qgm(input) { // Ok(model) => Ok(model), // Err(err) => { // let hir = match parse_input_as_hir(input) { // Ok(hir) => hir, // Err(err2) => // } // Model::from(hir) // } // } match mz_sql_parser::parser::parse_statements(input) { Ok(mut stmts) => { assert!(stmts.len() == 1); let stmt = stmts.pop().unwrap(); let scx = &StatementContext::new(None, catalog); if let mz_sql_parser::ast::Statement::Select(query) = stmt { let planned_query = match crate::plan::query::plan_root_query( scx, query.query, QueryLifetime::Static, ) { Ok(planned_query) => planned_query, Err(e) => return Err(format!("unable to plan query: {}: {}", input, e)), }; Model::try_from(planned_query.expr).map_err(|e| e.into()) } else { Err(format!("invalid query: {}", input)) } } Err(e) => { // TODO: try to parse the input as a spec for an HIR. // If that fails, try to parse the input as a spec for a QGM. // Change this error message. Err(format!("unable to parse SQL: {}: {}", input, e)) } } } fn run_command( command: &str, input: &str, args: &HashMap<String, Vec<String>>, catalog: &TestCatalog, ) -> Result<String, String>
#[test] fn test_qgm() { datadriven::walk("tests/querymodel", |f| { let mut catalog = TestCatalog::default(); f.run(move |s| -> String { match s.directive.as_str() { "cat" => match catalog.execute_commands(&s.input) { Ok(ok) => ok, Err(err) => err, }, other => match run_command(other, &s.input, &s.args, &catalog) { Ok(ok) => ok, Err(err) => err, }, } }) }); }
{ let mut model = convert_input_to_model(input, catalog)?; let directive: Directive = deserialize( &mut tokenize(command)?.into_iter(), "Directive", &RTI, &mut GenericTestDeserializeContext::default(), )?; if matches!(directive, Directive::Opt | Directive::EndToEnd) { model.optimize(); } // TODO: allow printing multiple stages of the transformation of the query. if matches!(directive, Directive::Lower | Directive::EndToEnd) { match model.try_into() { Ok(mir) => Ok(generate_explanation(catalog, &mir, args.get("format"))), Err(err) => Err(err.to_string()), } } else { match model.as_dot(input, catalog, false) { Ok(graph) => Ok(graph), Err(e) => return Err(format!("graph generation error: {}", e)), } } }
main.go
// Copyright © 2017 Aidan Steele <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import "github.com/glassechidna/faketags/cmd"
func main() { cmd.Execute() }
index.js
#!/usr/bin/env node const boltComplete = require('./complete');
boltComplete.start();
get_catalog_private_endpoint_request_response.go
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved. // This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. // Code generated. DO NOT EDIT. package datacatalog import ( "github.com/oracle/oci-go-sdk/v49/common" "net/http" ) // GetCatalogPrivateEndpointRequest wrapper for the GetCatalogPrivateEndpoint operation // // See also // // Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/datacatalog/GetCatalogPrivateEndpoint.go.html to see an example of how to use GetCatalogPrivateEndpointRequest. type GetCatalogPrivateEndpointRequest struct { // Unique private reverse connection identifier. CatalogPrivateEndpointId *string `mandatory:"true" contributesTo:"path" name:"catalogPrivateEndpointId"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata } func (request GetCatalogPrivateEndpointRequest) String() string { return common.PointerString(request) } // HTTPRequest implements the OCIRequest interface func (request GetCatalogPrivateEndpointRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error) { return common.MakeDefaultHTTPRequestWithTaggedStructAndExtraHeaders(method, path, request, extraHeaders) } // BinaryRequestBody implements the OCIRequest interface func (request GetCatalogPrivateEndpointRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) { return nil, false } // RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy. func (request GetCatalogPrivateEndpointRequest) RetryPolicy() *common.RetryPolicy { return request.RequestMetadata.RetryPolicy } // GetCatalogPrivateEndpointResponse wrapper for the GetCatalogPrivateEndpoint operation
// The CatalogPrivateEndpoint instance CatalogPrivateEndpoint `presentIn:"body"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` // For optimistic concurrency control. See ETags for Optimistic Concurrency Control (https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#eleven). Etag *string `presentIn:"header" name:"etag"` } func (response GetCatalogPrivateEndpointResponse) String() string { return common.PointerString(response) } // HTTPResponse implements the OCIResponse interface func (response GetCatalogPrivateEndpointResponse) HTTPResponse() *http.Response { return response.RawResponse }
type GetCatalogPrivateEndpointResponse struct { // The underlying http response RawResponse *http.Response
substring.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Defines kernel to extract a substring of a \[Large\]StringArray use crate::{array::*, buffer::Buffer, datatypes::ToByteSlice}; use crate::{ datatypes::DataType, error::{ArrowError, Result}, }; use std::sync::Arc; fn generic_substring<OffsetSize: StringOffsetSizeTrait>( array: &GenericStringArray<OffsetSize>, start: OffsetSize, length: &Option<OffsetSize>, ) -> Result<ArrayRef> { // compute current offsets let offsets = array.data_ref().clone().buffers()[0].clone(); let offsets: &[OffsetSize] = unsafe { offsets.typed_data::<OffsetSize>() }; // compute null bitmap (copy) let null_bit_buffer = array.data_ref().null_buffer().cloned(); // compute values let values = &array.data_ref().buffers()[1]; let data = values.data(); let mut new_values = Vec::new(); // we have no way to estimate how much this will be. let mut new_offsets: Vec<OffsetSize> = Vec::with_capacity(array.len() + 1); let mut length_so_far = OffsetSize::zero(); new_offsets.push(length_so_far); (0..array.len()).for_each(|i| { // the length of this entry let lenght_i: OffsetSize = offsets[i + 1] - offsets[i]; // compute where we should start slicing this entry let start = offsets[i] + if start >= OffsetSize::zero() { start } else { lenght_i + start }; let start = start.max(offsets[i]).min(offsets[i + 1]); // compute the lenght of the slice let length: OffsetSize = length .unwrap_or(lenght_i) // .max(0) is not needed as it is guaranteed .min(offsets[i + 1] - start); // so we do not go beyond this entry length_so_far = length_so_far + length; new_offsets.push(length_so_far); // we need usize for ranges let start = start.to_usize().unwrap(); let length = length.to_usize().unwrap(); new_values.extend_from_slice(&data[start..start + length]); }); let data = ArrayData::new( <OffsetSize as StringOffsetSizeTrait>::DATA_TYPE, array.len(), None, null_bit_buffer, 0, vec![ Buffer::from(new_offsets.to_byte_slice()), Buffer::from(&new_values[..]), ], vec![], ); Ok(make_array(Arc::new(data))) } /// Returns an ArrayRef with a substring starting from `start` and with optional length `length` of each of the elements in `array`. /// `start` can be negative, in which case the start counts from the end of the string. /// this function errors when the passed array is not a \[Large\]String array. pub fn substring(array: &Array, start: i64, length: &Option<u64>) -> Result<ArrayRef> { match array.data_type() { DataType::LargeUtf8 => generic_substring( array .as_any() .downcast_ref::<LargeStringArray>() .expect("A large string is expected"), start, &length.map(|e| e as i64), ), DataType::Utf8 => generic_substring( array .as_any() .downcast_ref::<StringArray>() .expect("A string is expected"), start as i32, &length.map(|e| e as i32), ), _ => Err(ArrowError::ComputeError(format!( "substring does not support type {:?}", array.data_type() ))), } } #[cfg(test)] mod tests { use super::*; fn with_nulls<T: 'static + Array + PartialEq + From<Vec<Option<&'static str>>>>( ) -> Result<()> { let cases = vec![ // identity ( vec![Some("hello"), None, Some("word")], 0, None, vec![Some("hello"), None, Some("word")], ), // 0 length -> Nothing ( vec![Some("hello"), None, Some("word")], 0, Some(0), vec![Some(""), None, Some("")], ), // high start -> Nothing ( vec![Some("hello"), None, Some("word")], 1000, Some(0), vec![Some(""), None, Some("")], ), // high negative start -> identity ( vec![Some("hello"), None, Some("word")], -1000, None, vec![Some("hello"), None, Some("word")], ), // high length -> identity ( vec![Some("hello"), None, Some("word")], 0, Some(1000), vec![Some("hello"), None, Some("word")], ), ]; cases.into_iter().try_for_each::<_, Result<()>>( |(array, start, length, expected)| { let array = T::from(array); let result: ArrayRef = substring(&array, start, &length)?; assert_eq!(array.len(), result.len()); let result = result.as_any().downcast_ref::<T>().unwrap(); let expected = T::from(expected); assert_eq!(&expected, result); Ok(()) }, )?; Ok(()) } #[test] fn with_nulls_string() -> Result<()> { with_nulls::<StringArray>() } #[test] fn with_nulls_large_string() -> Result<()> { with_nulls::<LargeStringArray>() } fn without_nulls<T: 'static + Array + PartialEq + From<Vec<Option<&'static str>>>>( ) -> Result<()>
#[test] fn without_nulls_string() -> Result<()> { without_nulls::<StringArray>() } #[test] fn without_nulls_large_string() -> Result<()> { without_nulls::<LargeStringArray>() } }
{ let cases = vec![ // increase start ( vec!["hello", "", "word"], 0, None, vec!["hello", "", "word"], ), (vec!["hello", "", "word"], 1, None, vec!["ello", "", "ord"]), (vec!["hello", "", "word"], 2, None, vec!["llo", "", "rd"]), (vec!["hello", "", "word"], 3, None, vec!["lo", "", "d"]), (vec!["hello", "", "word"], 10, None, vec!["", "", ""]), // increase start negatively (vec!["hello", "", "word"], -1, None, vec!["o", "", "d"]), (vec!["hello", "", "word"], -2, None, vec!["lo", "", "rd"]), (vec!["hello", "", "word"], -3, None, vec!["llo", "", "ord"]), ( vec!["hello", "", "word"], -10, None, vec!["hello", "", "word"], ), // increase length (vec!["hello", "", "word"], 1, Some(1), vec!["e", "", "o"]), (vec!["hello", "", "word"], 1, Some(2), vec!["el", "", "or"]), ( vec!["hello", "", "word"], 1, Some(3), vec!["ell", "", "ord"], ), ( vec!["hello", "", "word"], 1, Some(4), vec!["ello", "", "ord"], ), (vec!["hello", "", "word"], -3, Some(1), vec!["l", "", "o"]), (vec!["hello", "", "word"], -3, Some(2), vec!["ll", "", "or"]), ( vec!["hello", "", "word"], -3, Some(3), vec!["llo", "", "ord"], ), ( vec!["hello", "", "word"], -3, Some(4), vec!["llo", "", "ord"], ), ]; cases.into_iter().try_for_each::<_, Result<()>>( |(array, start, length, expected)| { let array = StringArray::from(array); let result = substring(&array, start, &length)?; assert_eq!(array.len(), result.len()); let result = result.as_any().downcast_ref::<StringArray>().unwrap(); let expected = StringArray::from(expected); assert_eq!(&expected, result,); Ok(()) }, )?; Ok(()) }
lib.rs
pub mod transfer_types; use crate::transfer_types::{ ExternallyAllocatedByteArr, ExternallyAllocatedStr, RustAllocatedString, RustBox, StaticRustAllocatedString, }; use fnv::FnvHasher; use std::{ hash::Hasher, io::{self, Write}, net::IpAddr, }; use wirefilter::{ExecutionContext, Filter, FilterAst, ParseError, Scheme, Type}; const VERSION: &str = env!("CARGO_PKG_VERSION"); #[repr(u8)] pub enum ParsingResult<'s> { Err(RustAllocatedString), Ok(RustBox<FilterAst<'s>>), } impl<'s> From<FilterAst<'s>> for ParsingResult<'s> { fn from(filter_ast: FilterAst<'s>) -> Self { ParsingResult::Ok(filter_ast.into()) } } impl<'s, 'a> From<ParseError<'a>> for ParsingResult<'s> { fn from(err: ParseError<'a>) -> Self { ParsingResult::Err(RustAllocatedString::from(err.to_string())) } } impl<'s> ParsingResult<'s> { pub fn unwrap(self) -> RustBox<FilterAst<'s>> { match self { ParsingResult::Err(err) => panic!("{}", &err as &str), ParsingResult::Ok(filter) => filter, } } } #[no_mangle] pub extern "C" fn wirefilter_create_scheme() -> RustBox<Scheme> { Default::default() } #[no_mangle] pub extern "C" fn wirefilter_free_scheme(scheme: RustBox<Scheme>)
#[no_mangle] pub extern "C" fn wirefilter_add_type_field_to_scheme( scheme: &mut Scheme, name: ExternallyAllocatedStr<'_>, ty: Type, ) { scheme.add_field(name.into_ref().to_owned(), ty).unwrap(); } #[no_mangle] pub extern "C" fn wirefilter_free_parsed_filter(filter_ast: RustBox<FilterAst<'_>>) { drop(filter_ast); } #[no_mangle] pub extern "C" fn wirefilter_free_string(s: RustAllocatedString) { drop(s); } #[no_mangle] pub extern "C" fn wirefilter_parse_filter<'s, 'i>( scheme: &'s Scheme, input: ExternallyAllocatedStr<'i>, ) -> ParsingResult<'s> { match scheme.parse(input.into_ref()) { Ok(filter) => ParsingResult::from(filter), Err(err) => ParsingResult::from(err), } } #[no_mangle] pub extern "C" fn wirefilter_free_parsing_result(r: ParsingResult<'_>) { drop(r); } /// Wrapper for Hasher that allows using Write API (e.g. with serializer). #[derive(Default)] struct HasherWrite<H: Hasher>(H); impl<H: Hasher> Write for HasherWrite<H> { fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.0.write(buf); Ok(()) } fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.write_all(buf)?; Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } fn unwrap_json_result<T>(filter_ast: &FilterAst<'_>, result: serde_json::Result<T>) -> T { // Filter serialisation must never fail. result.unwrap_or_else(|err| panic!("{} while serializing filter {:#?}", err, filter_ast)) } #[no_mangle] pub extern "C" fn wirefilter_get_filter_hash(filter_ast: &FilterAst<'_>) -> u64 { let mut hasher = FnvHasher::default(); // Serialize JSON to our Write-compatible wrapper around FnvHasher, // effectively calculating a hash for our filter in a streaming fashion // that is as stable as the JSON representation itself // (instead of relying on #[derive(Hash)] which would be tied to impl details). let result = serde_json::to_writer(HasherWrite(&mut hasher), filter_ast); unwrap_json_result(filter_ast, result); hasher.finish() } #[no_mangle] pub extern "C" fn wirefilter_serialize_filter_to_json( filter_ast: &FilterAst<'_>, ) -> RustAllocatedString { let result = serde_json::to_string(filter_ast); unwrap_json_result(filter_ast, result).into() } #[no_mangle] pub extern "C" fn wirefilter_create_execution_context<'e, 's: 'e>( scheme: &'s Scheme, ) -> RustBox<ExecutionContext<'e>> { ExecutionContext::new(scheme).into() } #[no_mangle] pub extern "C" fn wirefilter_free_execution_context(exec_context: RustBox<ExecutionContext<'_>>) { drop(exec_context); } #[no_mangle] pub extern "C" fn wirefilter_add_int_value_to_execution_context<'a>( exec_context: &mut ExecutionContext<'a>, name: ExternallyAllocatedStr<'_>, value: i32, ) { exec_context .set_field_value(name.into_ref(), value) .unwrap(); } #[no_mangle] pub extern "C" fn wirefilter_add_bytes_value_to_execution_context<'a>( exec_context: &mut ExecutionContext<'a>, name: ExternallyAllocatedStr<'_>, value: ExternallyAllocatedByteArr<'a>, ) { let slice: &[u8] = value.into_ref(); exec_context .set_field_value(name.into_ref(), slice) .unwrap(); } #[no_mangle] pub extern "C" fn wirefilter_add_ipv6_value_to_execution_context( exec_context: &mut ExecutionContext<'_>, name: ExternallyAllocatedStr<'_>, value: &[u8; 16], ) { exec_context .set_field_value(name.into_ref(), IpAddr::from(*value)) .unwrap(); } #[no_mangle] pub extern "C" fn wirefilter_add_ipv4_value_to_execution_context( exec_context: &mut ExecutionContext<'_>, name: ExternallyAllocatedStr<'_>, value: &[u8; 4], ) { exec_context .set_field_value(name.into_ref(), IpAddr::from(*value)) .unwrap(); } #[no_mangle] pub extern "C" fn wirefilter_add_bool_value_to_execution_context( exec_context: &mut ExecutionContext<'_>, name: ExternallyAllocatedStr<'_>, value: bool, ) { exec_context .set_field_value(name.into_ref(), value) .unwrap(); } #[no_mangle] pub extern "C" fn wirefilter_compile_filter<'s>( filter_ast: RustBox<FilterAst<'s>>, ) -> RustBox<Filter<'s>> { let filter_ast = filter_ast.into_real_box(); filter_ast.compile().into() } #[no_mangle] pub extern "C" fn wirefilter_match<'s>( filter: &Filter<'s>, exec_context: &ExecutionContext<'s>, ) -> bool { filter.execute(exec_context).unwrap().unwrap_or(false) } #[no_mangle] pub extern "C" fn wirefilter_free_compiled_filter(filter: RustBox<Filter<'_>>) { drop(filter); } #[no_mangle] pub extern "C" fn wirefilter_filter_uses( filter_ast: &FilterAst<'_>, field_name: ExternallyAllocatedStr<'_>, ) -> bool { filter_ast.uses(field_name.into_ref()).unwrap() } #[no_mangle] pub extern "C" fn wirefilter_get_version() -> StaticRustAllocatedString { StaticRustAllocatedString::from(VERSION) } #[cfg(test)] mod ffi_test { use super::*; use regex::Regex; fn create_scheme() -> RustBox<Scheme> { let mut scheme = wirefilter_create_scheme(); wirefilter_add_type_field_to_scheme( &mut scheme, ExternallyAllocatedStr::from("ip1"), Type::Ip, ); wirefilter_add_type_field_to_scheme( &mut scheme, ExternallyAllocatedStr::from("ip2"), Type::Ip, ); wirefilter_add_type_field_to_scheme( &mut scheme, ExternallyAllocatedStr::from("str1"), Type::Bytes, ); wirefilter_add_type_field_to_scheme( &mut scheme, ExternallyAllocatedStr::from("str2"), Type::Bytes, ); wirefilter_add_type_field_to_scheme( &mut scheme, ExternallyAllocatedStr::from("num1"), Type::Int, ); wirefilter_add_type_field_to_scheme( &mut scheme, ExternallyAllocatedStr::from("num2"), Type::Int, ); scheme } fn create_execution_context<'e, 's: 'e>(scheme: &'s Scheme) -> RustBox<ExecutionContext<'e>> { let mut exec_context = wirefilter_create_execution_context(scheme); wirefilter_add_ipv4_value_to_execution_context( &mut exec_context, ExternallyAllocatedStr::from("ip1"), &[127, 0, 0, 1], ); wirefilter_add_ipv6_value_to_execution_context( &mut exec_context, ExternallyAllocatedStr::from("ip2"), b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xC0\xA8\x00\x01", ); wirefilter_add_bytes_value_to_execution_context( &mut exec_context, ExternallyAllocatedStr::from("str1"), ExternallyAllocatedByteArr::from("Hey"), ); wirefilter_add_bytes_value_to_execution_context( &mut exec_context, ExternallyAllocatedStr::from("str2"), ExternallyAllocatedByteArr::from("yo123"), ); wirefilter_add_int_value_to_execution_context( &mut exec_context, ExternallyAllocatedStr::from("num1"), 42, ); wirefilter_add_int_value_to_execution_context( &mut exec_context, ExternallyAllocatedStr::from("num2"), 1337, ); exec_context } fn parse_filter<'s>(scheme: &'s Scheme, input: &'static str) -> ParsingResult<'s> { wirefilter_parse_filter(scheme, ExternallyAllocatedStr::from(input)) } fn match_filter( input: &'static str, scheme: &Scheme, exec_context: &ExecutionContext<'_>, ) -> bool { let filter = parse_filter(scheme, input).unwrap(); let filter = wirefilter_compile_filter(filter); let result = wirefilter_match(&filter, exec_context); wirefilter_free_compiled_filter(filter); result } #[test] fn parse_error() { use indoc::indoc; let src = indoc!( r#" ( num1 == 42 or num1 == "abc" ) "# ); let scheme = create_scheme(); { let result = parse_filter(&scheme, src); match result { ParsingResult::Ok(_) => panic!("Error expected"), ParsingResult::Err(err) => { assert_eq!( &err as &str, indoc!( r#" Filter parsing error (4:13): num1 == "abc" ^^^^^ expected digit "# ) ); wirefilter_free_string(err); } } } wirefilter_free_scheme(scheme); } #[test] fn filter_parsing() { let scheme = create_scheme(); { let filter = parse_filter(&scheme, r#"num1 > 3 && str2 == "abc""#).unwrap(); let json = wirefilter_serialize_filter_to_json(&filter); assert_eq!( &json as &str, r#"{"op":"And","items":[{"lhs":"num1","op":"GreaterThan","rhs":3},{"lhs":"str2","op":"Equal","rhs":"abc"}]}"# ); wirefilter_free_string(json); wirefilter_free_parsed_filter(filter); } wirefilter_free_scheme(scheme); } #[test] fn filter_matching() { let scheme = create_scheme(); { let exec_context = create_execution_context(&scheme); assert!(match_filter( r#"num1 > 41 && num2 == 1337 && ip1 != 192.168.0.1 && str2 ~ "yo\d+""#, &scheme, &exec_context )); assert!(match_filter( r#"ip2 == 0:0:0:0:0:ffff:c0a8:1 && (str1 == "Hey" || str2 == "ya")"#, &scheme, &exec_context )); assert!(!match_filter( "ip1 == 127.0.0.1 && ip2 == 0:0:0:0:0:ffff:c0a8:2", &scheme, &exec_context )); wirefilter_free_execution_context(exec_context); } wirefilter_free_scheme(scheme); } #[test] fn filter_hash() { let scheme = create_scheme(); { let filter1 = parse_filter( &scheme, r#"num1 > 41 && num2 == 1337 && ip1 != 192.168.0.1 && str2 ~ "yo\d+""#, ) .unwrap(); let filter2 = parse_filter( &scheme, r#"num1 > 41 && num2 == 1337 && ip1 != 192.168.0.1 and str2 ~ "yo\d+""#, ) .unwrap(); let filter3 = parse_filter(&scheme, r#"num1 > 41 && num2 == 1337"#).unwrap(); let hash1 = wirefilter_get_filter_hash(&filter1); let hash2 = wirefilter_get_filter_hash(&filter2); let hash3 = wirefilter_get_filter_hash(&filter3); assert_eq!(hash1, hash2); assert_ne!(hash2, hash3); wirefilter_free_parsed_filter(filter1); wirefilter_free_parsed_filter(filter2); wirefilter_free_parsed_filter(filter3); } wirefilter_free_scheme(scheme); } #[test] fn get_version() { let version = wirefilter_get_version(); let re = Regex::new(r"(?-u)^\d+\.\d+\.\d+$").unwrap(); assert!(re.is_match(version.into_ref())); } #[test] fn filter_uses() { let scheme = create_scheme(); { let filter = parse_filter( &scheme, r#"num1 > 41 && num2 == 1337 && ip1 != 192.168.0.1 && str2 ~ "yo\d+""#, ) .unwrap(); assert!(wirefilter_filter_uses( &filter, ExternallyAllocatedStr::from("num1") )); assert!(wirefilter_filter_uses( &filter, ExternallyAllocatedStr::from("ip1") )); assert!(wirefilter_filter_uses( &filter, ExternallyAllocatedStr::from("str2") )); assert!(!wirefilter_filter_uses( &filter, ExternallyAllocatedStr::from("str1") )); assert!(!wirefilter_filter_uses( &filter, ExternallyAllocatedStr::from("ip2") )); wirefilter_free_parsed_filter(filter); } wirefilter_free_scheme(scheme); } }
{ drop(scheme); }
bigint.rs
// Copyright 2015-2017 Parity Technologies // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! benchmarking for bigint //! should be started with: //! ```bash //! rustup run nightly cargo bench //! ``` #![feature(test)] #![feature(asm)] extern crate test; extern crate bigint; use test::{Bencher, black_box}; use bigint::{U256, U512, U128}; #[bench] fn u256_add(b: &mut Bencher) { b.iter(|| { let n = black_box(10000); let zero = black_box(U256::zero()); (0..n).fold(zero, |old, new| { old.overflowing_add(U256::from(black_box(new))).0 }) }); } #[bench] fn u256_sub(b: &mut Bencher) { b.iter(|| { let n = black_box(10000); let max = black_box(U256::max_value()); (0..n).fold(max, |old, new| { old.overflowing_sub(U256::from(black_box(new))).0 }) }); } #[bench] fn u512_sub(b: &mut Bencher) { b.iter(|| { let n = black_box(10000); let max = black_box(U512::max_value()); (0..n).fold(max, |old, new| { let new = black_box(new); let p = new % 2; old.overflowing_sub(U512([p, p, p, p, p, p, p, new])).0 }) }); } #[bench] fn u512_add(b: &mut Bencher) { b.iter(|| { let n = black_box(10000); let zero = black_box(U512::zero()); (0..n).fold(zero, |old, new| { let new = black_box(new); old.overflowing_add(U512([new, new, new, new, new, new, new, new])) .0 }) }); } #[bench] fn u512_mul(b: &mut Bencher) { b.iter(|| { (1..10000).fold(black_box(U512::one()), |old, new| { old.overflowing_mul(U512::from(black_box(new | 1))).0
#[bench] fn u512_mul_small(b: &mut Bencher) { b.iter(|| { (1..153).fold(black_box(U512::one()), |old, _| { old.overflowing_mul(U512::from(black_box(10))).0 }) }); } #[bench] fn u256_mul(b: &mut Bencher) { b.iter(|| { (1..10000).fold(black_box(U256::one()), |old, new| { old.overflowing_mul(U256::from(black_box(new | 1))).0 }) }); } #[bench] fn u256_mul_small(b: &mut Bencher) { b.iter(|| { (1..77).fold(black_box(U256::one()), |old, _| { old.overflowing_mul(U256::from(black_box(10))).0 }) }); } #[bench] fn u256_full_mul(b: &mut Bencher) { b.iter(|| { let n = black_box(10000); let one = black_box(U256::one()); (1..n).map(|n| n | 1).fold(one, |old, new| { let new = black_box(new); let U512(ref u512words) = old.full_mul(U256([new, new, new, new])); U256([u512words[0], u512words[2], u512words[2], u512words[3]]) }) }); } #[bench] fn u128_mul(b: &mut Bencher) { b.iter(|| { let n = black_box(10000); (1..n).fold(U128([12345u64, 0u64]), |old, new| { old.overflowing_mul(U128::from(new | 1)).0 }) }); } #[bench] fn u256_from_le(b: &mut Bencher) { b.iter(|| { let raw = black_box( [ 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, ], ); let _ = U256::from_little_endian(&raw[..]); }); } #[bench] fn u256_from_be(b: &mut Bencher) { b.iter(|| { let raw = black_box( [ 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, ], ); let _ = U256::from_big_endian(&raw[..]); }); }
}) }); }
mod.rs
//! ABI param and parsing for it. mod lenient; mod strict; mod token; use {ParamType, Error, ErrorKind, ResultExt}; pub use self::lenient::LenientTokenizer; pub use self::strict::StrictTokenizer; pub use self::token::Token; use rstd::prelude::*; use rstd::vec::Vec; #[cfg(not(feature = "std"))] use alloc::string::String; /// This trait should be used to parse string values as tokens. pub trait Tokenizer { /// Tries to parse a string as a token of given type. fn tokenize(param: &ParamType, value: &str) -> Result<Token, Error> { match *param { ParamType::Address => Self::tokenize_address(value).map(|a| Token::Address(a.into())), ParamType::String => Self::tokenize_string(value).map(Token::String), ParamType::Bool => Self::tokenize_bool(value).map(Token::Bool), ParamType::Bytes => Self::tokenize_bytes(value).map(Token::Bytes), ParamType::FixedBytes(len) => Self::tokenize_fixed_bytes(value, len).map(Token::FixedBytes), ParamType::Uint(_) => Self::tokenize_uint(value).map(Into::into).map(Token::Uint), ParamType::Int(_) => Self::tokenize_int(value).map(Into::into).map(Token::Int), ParamType::Array(ref p) => Self::tokenize_array(value, p).map(Token::Array), ParamType::FixedArray(ref p, len) => Self::tokenize_fixed_array(value, p, len).map(Token::FixedArray), }.chain_err(|| format!("Cannot parse {}", param)) } /// Tries to parse a value as a vector of tokens of fixed size. fn tokenize_fixed_array(value: &str, param: &ParamType, len: usize) -> Result<Vec<Token>, Error> { let result = Self::tokenize_array(value, param)?; match result.len() == len { true => Ok(result), false => Err(ErrorKind::InvalidData.into()), } } /// Tries to parse a value as a vector of tokens. fn tokenize_array(value: &str, param: &ParamType) -> Result<Vec<Token>, Error> { if !value.starts_with('[') || !value.ends_with(']') { return Err(ErrorKind::InvalidData.into()); } if value.chars().count() == 2 { return Ok(vec![]); } let mut result = vec![]; let mut nested = 0isize; let mut ignore = false; let mut last_item = 1; for (i, ch) in value.chars().enumerate() { match ch { '[' if ignore == false => { nested += 1; }, ']' if ignore == false => { nested -= 1; if nested < 0 { return Err(ErrorKind::InvalidData.into()); } else if nested == 0 { let sub = &value[last_item..i]; let token = Self::tokenize(param, sub)?; result.push(token); last_item = i + 1; } }, '"' => { ignore = !ignore; }, ',' if nested == 1 && ignore == false => { let sub = &value[last_item..i]; let token = Self::tokenize(param, sub)?; result.push(token); last_item = i + 1; }, _ => () } } if ignore { return Err(ErrorKind::InvalidData.into()); } Ok(result) } /// Tries to parse a value as an address. fn tokenize_address(value: &str) -> Result<[u8; 20], Error>; /// Tries to parse a value as a string. fn tokenize_string(value: &str) -> Result<String, Error>; /// Tries to parse a value as a bool. fn tokenize_bool(value: &str) -> Result<bool, Error>; /// Tries to parse a value as bytes. fn tokenize_bytes(value: &str) -> Result<Vec<u8>, Error>; /// Tries to parse a value as bytes. fn tokenize_fixed_bytes(value: &str, len: usize) -> Result<Vec<u8>, Error>; /// Tries to parse a value as unsigned integer. fn tokenize_uint(value: &str) -> Result<[u8; 32], Error>; /// Tries to parse a value as signed integer. fn tokenize_int(value: &str) -> Result<[u8; 32], Error>; } #[cfg(test)] mod test { use super::{LenientTokenizer, Tokenizer, ParamType}; #[test] fn single_quoted_in_array_must_error()
}
{ assert!(LenientTokenizer::tokenize_array("[1,\"0,false]", &ParamType::Bool).is_err()); assert!(LenientTokenizer::tokenize_array("[false\"]", &ParamType::Bool).is_err()); assert!(LenientTokenizer::tokenize_array("[1,false\"]", &ParamType::Bool).is_err()); assert!(LenientTokenizer::tokenize_array("[1,\"0\",false]", &ParamType::Bool).is_err()); assert!(LenientTokenizer::tokenize_array("[1,0]", &ParamType::Bool).is_ok()); }
inverse_square_root_decay_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqLRScheduler, register_lr_scheduler import torch @register_lr_scheduler('inverse_sqrt_decay') class
(FairseqLRScheduler): """Decay the LR based on the inverse square root of the update number. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter we decay proportional to the number of updates, with a decay factor set to align with the configured learning rate. During warmup:: lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) lr = lrs[update_num] After warmup:: decay_factor = args.lr * sqrt(args.warmup_updates) lr = decay_factor / sqrt(update_num) """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with inverse_sqrt.' ' Consider --lr-scheduler=fixed instead.' ) warmup_end_lr = args.lr[0] if args.warmup_init_lr < 0: args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first args.warmup_updates self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates # then, decay prop. to the inverse square root of the update number self.decay_factor = warmup_end_lr * args.warmup_updates**0.5 # initial learning rate self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer.optimizer, patience=0, factor=args.lr_shrink, threshold=args.lr_threshold) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)') parser.add_argument('--lr-threshold', default=1e-4, type=float, metavar='LT', help='Threshold for measuring the new optimum, \ to only focus on significant changes') # fmt: on def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) if val_loss is not None: self.lr_scheduler.step(val_loss, epoch) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates*self.lr_step else: self.lr = self.decay_factor * num_updates**-0.5 self.optimizer.set_lr(self.lr) return self.lr
InverseSquareRootDecaySchedule
handlenetworkinstance.go
// Copyright (c) 2018,2019 Zededa, Inc. // SPDX-License-Identifier: Apache-2.0 // Handle NetworkInstanceStatus from zedrouter package zedagent import ( "bytes" "net" "strings" "time" "github.com/golang/protobuf/ptypes/timestamp" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" zcommon "github.com/lf-edge/eve/api/go/evecommon" "github.com/lf-edge/eve/api/go/flowlog" zinfo "github.com/lf-edge/eve/api/go/info" // XXX need to stop using zmet "github.com/lf-edge/eve/api/go/metrics" // zinfo and zmet here "github.com/lf-edge/eve/pkg/pillar/types" "github.com/lf-edge/eve/pkg/pillar/zedcloud" log "github.com/sirupsen/logrus" ) var flowIteration int func handleNetworkInstanceModify(ctxArg interface{}, key string, statusArg interface{}) { log.Infof("handleNetworkInstanceStatusModify(%s)", key) ctx := ctxArg.(*zedagentContext) status := statusArg.(types.NetworkInstanceStatus) if !status.ErrorTime.IsZero() { log.Errorf("Received NetworkInstance error %s", status.Error) } prepareAndPublishNetworkInstanceInfoMsg(ctx, status, false) log.Infof("handleNetworkInstanceModify(%s) done", key) } func handleNetworkInstanceDelete(ctxArg interface{}, key string, statusArg interface{}) { log.Infof("handleNetworkInstanceDelete(%s)", key) status := statusArg.(types.NetworkInstanceStatus) ctx := ctxArg.(*zedagentContext) prepareAndPublishNetworkInstanceInfoMsg(ctx, status, true) log.Infof("handleNetworkInstanceDelete(%s) done", key) }
status types.NetworkInstanceStatus, deleted bool) { infoMsg := &zinfo.ZInfoMsg{} infoType := new(zinfo.ZInfoTypes) *infoType = zinfo.ZInfoTypes_ZiNetworkInstance infoMsg.DevId = *proto.String(zcdevUUID.String()) infoMsg.Ztype = *infoType infoMsg.AtTimeStamp = ptypes.TimestampNow() uuid := status.Key() info := new(zinfo.ZInfoNetworkInstance) info.NetworkID = uuid info.NetworkVersion = status.UUIDandVersion.Version info.Displayname = status.DisplayName info.InstType = uint32(status.Type) info.CurrentUplinkIntf = status.CurrentUplinkIntf if !status.ErrorTime.IsZero() { errInfo := new(zinfo.ErrorInfo) errInfo.Description = status.Error errTime, _ := ptypes.TimestampProto(status.ErrorTime) errInfo.Timestamp = errTime info.NetworkErr = append(info.NetworkErr, errInfo) } if deleted { // XXX When a network instance is deleted it is ideal to // send a flag such as deleted/gone inside // ZInfoNetworkInstance message. Having a separate flag // (indicating deletion) would make is explicit // and easy for the cloud process. info.Activated = false } else { info.Activated = status.Activated info.BridgeNum = uint32(status.BridgeNum) info.BridgeName = status.BridgeName info.BridgeIPAddr = status.BridgeIPAddr for mac, ip := range status.IPAssignments { assignment := new(zinfo.ZmetIPAssignmentEntry) assignment.MacAddress = mac assignment.IpAddress = append(assignment.IpAddress, ip.String()) info.IpAssignments = append(info.IpAssignments, assignment) } for _, s := range status.BridgeIPSets { info.BridgeIPSets = append(info.BridgeIPSets, s) } for _, v := range status.Vifs { vi := new(zinfo.ZmetVifInfo) vi.VifName = v.Name vi.MacAddress = v.MacAddr vi.AppID = v.AppID.String() info.Vifs = append(info.Vifs, vi) } info.Ipv4Eid = status.Ipv4Eid for _, ifname := range status.IfNameList { ia := ctx.assignableAdapters.LookupIoBundleIfName(ifname) if ia == nil { log.Warnf("Missing adapter for ifname %s", ifname) continue } reportAA := new(zinfo.ZioBundle) reportAA.Type = zcommon.PhyIoType(ia.Type) reportAA.Name = ia.Phylabel reportAA.UsedByAppUUID = zcdevUUID.String() list := ctx.assignableAdapters.LookupIoBundleAny(ia.Phylabel) for _, ib := range list { if ib == nil { continue } reportAA.Members = append(reportAA.Members, ib.Phylabel) if ib.MacAddr != "" { reportMac := new(zinfo.IoAddresses) reportMac.MacAddress = ib.MacAddr reportAA.IoAddressList = append(reportAA.IoAddressList, reportMac) } log.Debugf("AssignableAdapters for %s macs %v", reportAA.Name, reportAA.IoAddressList) } info.AssignedAdapters = append(info.AssignedAdapters, reportAA) } // For now we just send an empty lispInfo to indicate deletion to cloud. // It can't be omitted since protobuf requires something to satisfy // the oneof. if status.LispInfoStatus != nil { fillLispInfo(info, status.LispInfoStatus) } // fill Vpn info if status.VpnStatus != nil { fillVpnInfo(info, status.VpnStatus) } } infoMsg.InfoContent = new(zinfo.ZInfoMsg_Niinfo) if x, ok := infoMsg.GetInfoContent().(*zinfo.ZInfoMsg_Niinfo); ok { x.Niinfo = info } log.Debugf("Publish NetworkInstance Info message to zedcloud: %v", infoMsg) publishInfo(ctx, uuid, infoMsg) } func fillLispInfo(info *zinfo.ZInfoNetworkInstance, lispStatus *types.LispInfoStatus) { lispInfo := new(zinfo.ZInfoLisp) lispInfo.ItrCryptoPort = lispStatus.ItrCryptoPort lispInfo.EtrNatPort = lispStatus.EtrNatPort for _, intf := range lispStatus.Interfaces { lispInfo.Interfaces = append(lispInfo.Interfaces, intf) } // Copy ITR database map entries for _, dbMap := range lispStatus.DatabaseMaps { dbMapEntry := &zinfo.DatabaseMap{ IID: dbMap.IID, } for _, mapEntry := range dbMap.MapCacheEntries { mapCacheEntry := &zinfo.MapCacheEntry{ EID: mapEntry.EID.String(), } for _, rloc := range mapEntry.Rlocs { rlocEntry := &zinfo.RlocState{ Rloc: rloc.Rloc.String(), Reachable: rloc.Reachable, } mapCacheEntry.Rlocs = append(mapCacheEntry.Rlocs, rlocEntry) } dbMapEntry.MapCacheEntries = append(dbMapEntry.MapCacheEntries, mapCacheEntry) } lispInfo.DatabaseMaps = append(lispInfo.DatabaseMaps, dbMapEntry) } // Copy ETR decap entries for _, decapKey := range lispStatus.DecapKeys { decap := &zinfo.DecapKey{ Rloc: decapKey.Rloc.String(), Port: decapKey.Port, KeyCount: decapKey.KeyCount, } lispInfo.DecapKeys = append(lispInfo.DecapKeys, decap) } info.InfoContent = new(zinfo.ZInfoNetworkInstance_Linfo) if x, ok := info.GetInfoContent().(*zinfo.ZInfoNetworkInstance_Linfo); ok { x.Linfo = lispInfo } } func fillVpnInfo(info *zinfo.ZInfoNetworkInstance, vpnStatus *types.VpnStatus) { info.SoftwareList = new(zinfo.ZInfoSW) info.SoftwareList.SwVersion = vpnStatus.Version upTime, _ := ptypes.TimestampProto(vpnStatus.UpTime) info.UpTimeStamp = upTime vpnInfo := new(zinfo.ZInfoVpn) vpnInfo.PolicyBased = vpnStatus.PolicyBased listeningIpAddrs := strings.Split(vpnStatus.IpAddrs, " ") vpnInfo.ListeningIpAddrs = make([]string, len(listeningIpAddrs)) for idx, ipAddr := range listeningIpAddrs { vpnInfo.ListeningIpAddrs[idx] = ipAddr } totalConnCount := len(vpnStatus.StaleVpnConns) + len(vpnStatus.ActiveVpnConns) if totalConnCount == 0 { info.InfoContent = new(zinfo.ZInfoNetworkInstance_Vinfo) if x, ok := info.GetInfoContent().(*zinfo.ZInfoNetworkInstance_Vinfo); ok { x.Vinfo = vpnInfo } return } vpnInfo.Conn = make([]*zinfo.ZInfoVpnConn, totalConnCount) // stale connections connIdx := 0 for _, vpnConn := range vpnStatus.StaleVpnConns { vpnConnInfo := publishVpnConnection(vpnInfo, vpnConn) if vpnConnInfo != nil { vpnInfo.Conn[connIdx] = vpnConnInfo connIdx++ } } // active connections for _, vpnConn := range vpnStatus.ActiveVpnConns { vpnConnInfo := publishVpnConnection(vpnInfo, vpnConn) if vpnConnInfo != nil { vpnInfo.Conn[connIdx] = vpnConnInfo connIdx++ } } info.InfoContent = new(zinfo.ZInfoNetworkInstance_Vinfo) if x, ok := info.GetInfoContent().(*zinfo.ZInfoNetworkInstance_Vinfo); ok { x.Vinfo = vpnInfo } } func handleNetworkInstanceMetricsModify(ctxArg interface{}, key string, statusArg interface{}) { log.Debugf("handleNetworkInstanceMetricsModify(%s)", key) } func handleNetworkInstanceMetricsDelete(ctxArg interface{}, key string, statusArg interface{}) { log.Infof("handleNetworkInstanceMetricsDelete(%s)", key) } func createNetworkInstanceMetrics(ctx *zedagentContext, reportMetrics *zmet.ZMetricMsg) { sub := ctx.subNetworkInstanceMetrics metlist := sub.GetAll() if metlist == nil || len(metlist) == 0 { return } for _, met := range metlist { metrics := met.(types.NetworkInstanceMetrics) metricInstance := protoEncodeNetworkInstanceMetricProto(metrics) reportMetrics.Nm = append(reportMetrics.Nm, metricInstance) } log.Debugln("network instance metrics: ", reportMetrics.Nm) } func protoEncodeNetworkInstanceMetricProto(status types.NetworkInstanceMetrics) *zmet.ZMetricNetworkInstance { metric := new(zmet.ZMetricNetworkInstance) metric.NetworkID = status.Key() metric.NetworkVersion = status.UUIDandVersion.Version metric.Displayname = status.DisplayName metric.InstType = uint32(status.Type) switch status.Type { case types.NetworkInstanceTypeCloud: protoEncodeVpnInstanceMetric(status, metric) case types.NetworkInstanceTypeMesh: // XXX any subtype? log.Debugf("Publish Lisp Instance Metric to Zedcloud %v", metric) protoEncodeLispInstanceMetric(status, metric) default: protoEncodeGenericInstanceMetric(status, metric) } return metric } func protoEncodeGenericInstanceMetric(status types.NetworkInstanceMetrics, metric *zmet.ZMetricNetworkInstance) { networkStats := new(zmet.ZMetricNetworkStats) rxStats := new(zmet.NetworkStats) txStats := new(zmet.NetworkStats) netMetric := status.NetworkMetrics.MetricList[0] rxStats.TotalPackets = netMetric.RxPkts rxStats.TotalBytes = netMetric.RxBytes rxStats.Errors = netMetric.RxErrors // Add all types of Rx drops var drops uint64 = 0 drops += netMetric.RxDrops drops += netMetric.RxAclDrops drops += netMetric.RxAclRateLimitDrops rxStats.Drops = drops txStats.TotalPackets = netMetric.TxPkts txStats.TotalBytes = netMetric.TxBytes txStats.Errors = netMetric.TxErrors // Add all types of Tx drops drops = 0 drops += netMetric.TxDrops drops += netMetric.TxAclDrops drops += netMetric.TxAclRateLimitDrops txStats.Drops = drops networkStats.Rx = rxStats networkStats.Tx = txStats metric.NetworkStats = networkStats } func protoEncodeLispInstanceMetric(status types.NetworkInstanceMetrics, metric *zmet.ZMetricNetworkInstance) { if status.LispMetrics == nil { return } protoEncodeGenericInstanceMetric(status, metric) metrics := status.LispMetrics lispGlobalMetric := new(zmet.ZMetricLispGlobal) lispGlobalMetric.ItrPacketSendError = &zmet.PktStat{ Packets: metrics.ItrPacketSendError.Pkts, Bytes: metrics.ItrPacketSendError.Bytes, } lispGlobalMetric.InvalidEidError = &zmet.PktStat{ Packets: metrics.InvalidEidError.Pkts, Bytes: metrics.InvalidEidError.Bytes, } lispGlobalMetric.NoDecryptKey = &zmet.PktStat{ Packets: metrics.NoDecryptKey.Pkts, Bytes: metrics.NoDecryptKey.Bytes, } lispGlobalMetric.OuterHeaderError = &zmet.PktStat{ Packets: metrics.OuterHeaderError.Pkts, Bytes: metrics.OuterHeaderError.Bytes, } lispGlobalMetric.BadInnerVersion = &zmet.PktStat{ Packets: metrics.BadInnerVersion.Pkts, Bytes: metrics.BadInnerVersion.Bytes, } lispGlobalMetric.GoodPackets = &zmet.PktStat{ Packets: metrics.GoodPackets.Pkts, Bytes: metrics.GoodPackets.Bytes, } lispGlobalMetric.ICVError = &zmet.PktStat{ Packets: metrics.ICVError.Pkts, Bytes: metrics.ICVError.Bytes, } lispGlobalMetric.LispHeaderError = &zmet.PktStat{ Packets: metrics.LispHeaderError.Pkts, Bytes: metrics.LispHeaderError.Bytes, } lispGlobalMetric.CheckSumError = &zmet.PktStat{ Packets: metrics.CheckSumError.Pkts, Bytes: metrics.CheckSumError.Bytes, } lispGlobalMetric.DecapReInjectError = &zmet.PktStat{ Packets: metrics.DecapReInjectError.Pkts, Bytes: metrics.DecapReInjectError.Bytes, } lispGlobalMetric.DecryptError = &zmet.PktStat{ Packets: metrics.DecryptError.Pkts, Bytes: metrics.DecryptError.Bytes, } metric.LispGlobalStats = lispGlobalMetric flowStats := []*zmet.ZMetricFlow{} for _, eidStat := range metrics.EidStats { iid := eidStat.IID metricFlow := &zmet.ZMetricFlow{ Iid: iid, } lEndPoint := &zmet.ZMetricFlowEndPoint{} flowLinks := []*zmet.ZMetricFlowLink{} for _, eidMap := range metrics.EidMaps { for _, eid := range eidMap.Eids { flowLink := &zmet.ZMetricFlowLink{} flowLink.Link = new(zmet.ZMetricFlowLink_Eid) if x, ok := flowLink.GetLink().(*zmet.ZMetricFlowLink_Eid); ok { x.Eid = eid.String() } flowLinks = append(flowLinks, flowLink) } } lEndPoint.Link = flowLinks metricFlow.LEndPoint = lEndPoint rEndPoint := []*zmet.ZMetricFlowEndPoint{} eid := eidStat.Eid for _, rlocStat := range eidStat.RlocStats { rloc := rlocStat.Rloc stat := rlocStat.Stats flowEndPoint := &zmet.ZMetricFlowEndPoint{} flowEndPoint.Stats = &zmet.PktStat{ Packets: stat.Pkts, Bytes: stat.Bytes, } flowEndPoint.Endpoint = new(zmet.ZMetricFlowEndPoint_Rloc) if x, ok := flowEndPoint.GetEndpoint().(*zmet.ZMetricFlowEndPoint_Rloc); ok { x.Rloc = rloc.String() } flowLinks := []*zmet.ZMetricFlowLink{} flowLink := &zmet.ZMetricFlowLink{} flowLink.Link = new(zmet.ZMetricFlowLink_Eid) if x, ok := flowLink.GetLink().(*zmet.ZMetricFlowLink_Eid); ok { x.Eid = eid.String() } flowLinks = append(flowLinks, flowLink) flowEndPoint.Link = flowLinks rEndPoint = append(rEndPoint, flowEndPoint) } metricFlow.REndPoint = rEndPoint flowStats = append(flowStats, metricFlow) } metric.FlowStats = flowStats // Fill lisp metric stats also for now. // We can deprecate the same later lispMetric := new(zmet.ZMetricLisp) lispMetric.ItrPacketSendError = &zmet.PktStat{ Packets: metrics.ItrPacketSendError.Pkts, Bytes: metrics.ItrPacketSendError.Bytes, } lispMetric.InvalidEidError = &zmet.PktStat{ Packets: metrics.InvalidEidError.Pkts, Bytes: metrics.InvalidEidError.Bytes, } lispMetric.NoDecryptKey = &zmet.PktStat{ Packets: metrics.NoDecryptKey.Pkts, Bytes: metrics.NoDecryptKey.Bytes, } lispMetric.OuterHeaderError = &zmet.PktStat{ Packets: metrics.OuterHeaderError.Pkts, Bytes: metrics.OuterHeaderError.Bytes, } lispMetric.BadInnerVersion = &zmet.PktStat{ Packets: metrics.BadInnerVersion.Pkts, Bytes: metrics.BadInnerVersion.Bytes, } lispMetric.GoodPackets = &zmet.PktStat{ Packets: metrics.GoodPackets.Pkts, Bytes: metrics.GoodPackets.Bytes, } lispMetric.ICVError = &zmet.PktStat{ Packets: metrics.ICVError.Pkts, Bytes: metrics.ICVError.Bytes, } lispMetric.LispHeaderError = &zmet.PktStat{ Packets: metrics.LispHeaderError.Pkts, Bytes: metrics.LispHeaderError.Bytes, } lispMetric.CheckSumError = &zmet.PktStat{ Packets: metrics.CheckSumError.Pkts, Bytes: metrics.CheckSumError.Bytes, } lispMetric.DecapReInjectError = &zmet.PktStat{ Packets: metrics.DecapReInjectError.Pkts, Bytes: metrics.DecapReInjectError.Bytes, } lispMetric.DecryptError = &zmet.PktStat{ Packets: metrics.DecryptError.Pkts, Bytes: metrics.DecryptError.Bytes, } lispStats := []*zmet.EidStats{} for _, eidStat := range metrics.EidStats { lispStat := &zmet.EidStats{ IID: eidStat.IID, EID: eidStat.Eid.String(), } rlocStats := []*zmet.RlocStats{} for _, rloc := range eidStat.RlocStats { rlocStat := &zmet.RlocStats{ Rloc: rloc.Rloc.String(), Stats: &zmet.PktStat{ Packets: rloc.Stats.Pkts, Bytes: rloc.Stats.Bytes, }, SecondsSinceLastPacket: rloc.SecondsSinceLastPacket, } rlocStats = append(rlocStats, rlocStat) } lispStat.RlocStatsEntries = rlocStats lispStats = append(lispStats, lispStat) } lispMetric.EidStatsEntries = lispStats metric.InstanceContent = new(zmet.ZMetricNetworkInstance_Lispm) if x, ok := metric.GetInstanceContent().(*zmet.ZMetricNetworkInstance_Lispm); ok { x.Lispm = lispMetric } } func protoEncodeVpnInstanceMetric(metrics types.NetworkInstanceMetrics, instanceMetrics *zmet.ZMetricNetworkInstance) { if metrics.VpnMetrics == nil { return } protoEncodeGenericInstanceMetric(metrics, instanceMetrics) stats := metrics.VpnMetrics vpnMetric := new(zmet.ZMetricVpn) vpnMetric.ConnStat = protoEncodeVpnInstanceStat(stats.DataStat) vpnMetric.NatTStat = protoEncodeVpnInstanceStat(stats.NatTStat) vpnMetric.IkeStat = protoEncodeVpnInstanceStat(stats.IkeStat) vpnMetric.EspStat = protoEncodeVpnInstanceStat(stats.EspStat) instanceMetrics.InstanceContent = new(zmet.ZMetricNetworkInstance_Vpnm) if x, ok := instanceMetrics.GetInstanceContent().(*zmet.ZMetricNetworkInstance_Vpnm); ok { x.Vpnm = vpnMetric } protoEncodeVpnInstanceFlowMetric(metrics, instanceMetrics) } func protoEncodeVpnInstanceStat(stats types.LinkPktStats) *zmet.ZMetricConn { connStat := new(zmet.ZMetricConn) connStat.InPkts = new(zmet.PktStat) connStat.OutPkts = new(zmet.PktStat) connStat.InPkts.Packets = stats.InPkts.Pkts connStat.InPkts.Bytes = stats.InPkts.Bytes connStat.OutPkts.Packets = stats.OutPkts.Pkts connStat.OutPkts.Bytes = stats.OutPkts.Bytes return connStat } func protoEncodeVpnInstanceFlowMetric(metrics types.NetworkInstanceMetrics, instanceMetrics *zmet.ZMetricNetworkInstance) { if len(metrics.VpnMetrics.VpnConns) == 0 { return } vpnMetrics := metrics.VpnMetrics instanceMetrics.FlowStats = make([]*zmet.ZMetricFlow, len(vpnMetrics.VpnConns)) for idx, connStats := range vpnMetrics.VpnConns { flowStats := new(zmet.ZMetricFlow) flowStats.Id = connStats.Id flowStats.Name = connStats.Name flowStats.Type = uint32(connStats.Type) flowStats.EstTime = connStats.EstTime lEndPoint := protoEncodeVpnMetricEndPtIpAddr(connStats.LEndPoint) lEndPoint.Stats = protoEncodeVpnMetricStats(connStats.LEndPoint.PktStats) lLink := protoEncodeVpnMetricLink(connStats.LEndPoint.LinkInfo) lEndPoint.Link = make([]*zmet.ZMetricFlowLink, 1) lEndPoint.Link[0] = lLink rEndPoint := protoEncodeVpnMetricEndPtIpAddr(connStats.REndPoint) rEndPoint.Stats = protoEncodeVpnMetricStats(connStats.REndPoint.PktStats) rLink := protoEncodeVpnMetricLink(connStats.REndPoint.LinkInfo) rEndPoint.Link = make([]*zmet.ZMetricFlowLink, 1) rEndPoint.Link[0] = rLink flowStats.LEndPoint = lEndPoint flowStats.REndPoint = make([]*zmet.ZMetricFlowEndPoint, 1) flowStats.REndPoint[0] = rEndPoint instanceMetrics.FlowStats[idx] = flowStats } } func protoEncodeVpnMetricEndPtIpAddr(endPInfo types.VpnEndPointMetrics) *zmet.ZMetricFlowEndPoint { endPoint := new(zmet.ZMetricFlowEndPoint) endPoint.Endpoint = new(zmet.ZMetricFlowEndPoint_IpAddr) if x, ok := endPoint.GetEndpoint().(*zmet.ZMetricFlowEndPoint_IpAddr); ok { x.IpAddr = endPInfo.IpAddr } return endPoint } func protoEncodeVpnMetricLink(linkInfo types.VpnLinkMetrics) *zmet.ZMetricFlowLink { link := new(zmet.ZMetricFlowLink) link.SpiId = linkInfo.SpiId link.Link = new(zmet.ZMetricFlowLink_SubNet) if x, ok := link.GetLink().(*zmet.ZMetricFlowLink_SubNet); ok { x.SubNet = linkInfo.SubNet } return link } func protoEncodeVpnMetricStats(linkStats types.PktStats) *zmet.PktStat { pktStats := new(zmet.PktStat) pktStats.Bytes = linkStats.Bytes pktStats.Packets = linkStats.Pkts return pktStats } func publishVpnConnection(vpnInfo *zinfo.ZInfoVpn, vpnConn *types.VpnConnStatus) *zinfo.ZInfoVpnConn { if vpnConn == nil { return nil } vpnConnInfo := new(zinfo.ZInfoVpnConn) vpnConnInfo.Id = vpnConn.Id vpnConnInfo.Name = vpnConn.Name vpnConnInfo.State = zinfo.ZInfoVpnState(vpnConn.State) vpnConnInfo.Ikes = vpnConn.Ikes vpnConnInfo.EstTime = vpnConn.EstTime vpnConnInfo.Version = vpnConn.Version lEndPointInfo := new(zinfo.ZInfoVpnEndPoint) lEndPointInfo.Id = vpnConn.LInfo.Id lEndPointInfo.IpAddr = vpnConn.LInfo.IpAddr lEndPointInfo.Port = vpnConn.LInfo.Port vpnConnInfo.LInfo = lEndPointInfo rEndPointInfo := new(zinfo.ZInfoVpnEndPoint) rEndPointInfo.Id = vpnConn.RInfo.Id rEndPointInfo.IpAddr = vpnConn.RInfo.IpAddr rEndPointInfo.Port = vpnConn.RInfo.Port vpnConnInfo.RInfo = rEndPointInfo if len(vpnConn.Links) == 0 { return vpnConnInfo } vpnConnInfo.Links = make([]*zinfo.ZInfoVpnLink, len(vpnConn.Links)) for idx, linkData := range vpnConn.Links { linkInfo := new(zinfo.ZInfoVpnLink) linkInfo.Id = linkData.Id linkInfo.ReqId = linkData.ReqId linkInfo.InstTime = linkData.InstTime linkInfo.EspInfo = linkData.EspInfo linkInfo.State = zinfo.ZInfoVpnState(linkData.State) linfo := new(zinfo.ZInfoVpnLinkInfo) linfo.SubNet = linkData.LInfo.SubNet linfo.SpiId = linkData.LInfo.SpiId linfo.Direction = linkData.LInfo.Direction linkInfo.LInfo = linfo rinfo := new(zinfo.ZInfoVpnLinkInfo) rinfo.SubNet = linkData.RInfo.SubNet rinfo.SpiId = linkData.RInfo.SpiId rinfo.Direction = linkData.RInfo.Direction linkInfo.RInfo = rinfo vpnConnInfo.Links[idx] = linkInfo } return vpnConnInfo } func publishInfo(ctx *zedagentContext, UUID string, infoMsg *zinfo.ZInfoMsg) { publishInfoToZedCloud(UUID, infoMsg, ctx.iteration) ctx.iteration += 1 } func publishInfoToZedCloud(UUID string, infoMsg *zinfo.ZInfoMsg, iteration int) { log.Infof("publishInfoToZedCloud sending %v", infoMsg) data, err := proto.Marshal(infoMsg) if err != nil { log.Fatal("publishInfoToZedCloud proto marshaling error: ", err) } statusUrl := zedcloud.URLPathString(serverNameAndPort, zedcloudCtx.V2API, false, devUUID, "info") zedcloud.RemoveDeferred(UUID) buf := bytes.NewBuffer(data) if buf == nil { log.Fatal("malloc error") } size := int64(proto.Size(infoMsg)) err = SendProtobuf(statusUrl, buf, size, iteration) if err != nil { log.Errorf("publishInfoToZedCloud failed: %s", err) // Try sending later // The buf might have been consumed buf := bytes.NewBuffer(data) if buf == nil { log.Fatal("malloc error") } zedcloud.SetDeferred(UUID, buf, size, statusUrl, zedcloudCtx, true) } else { writeSentDeviceInfoProtoMessage(data) } } func handleAppFlowMonitorModify(ctxArg interface{}, key string, statusArg interface{}) { log.Infof("handleAppFlowMonitorModify(%s)", key) flows := statusArg.(types.IPFlow) // encoding the flows with protobuf format pflows := protoEncodeAppFlowMonitorProto(flows) // send protobuf to zedcloud sendFlowProtobuf(pflows) } func handleAppFlowMonitorDelete(ctxArg interface{}, key string, statusArg interface{}) { log.Infof("handleAppFlowMonitorDelete(%s)", key) } func handleAppVifIPTrigModify(ctxArg interface{}, key string, statusArg interface{}) { log.Infof("handleAppVifIPTrigModify(%s)", key) ctx := ctxArg.(*zedagentContext) trig := statusArg.(types.VifIPTrig) findVifAndTrigAppInfoUpload(ctx, trig.MacAddr, trig.IPAddr) } func findVifAndTrigAppInfoUpload(ctx *zedagentContext, macAddr string, ipAddr net.IP) { sub := ctx.getconfigCtx.subAppInstanceStatus items := sub.GetAll() for _, st := range items { aiStatus := st.(types.AppInstanceStatus) log.Debugf("findVifAndTrigAppInfoUpload: mac address %s match, ip %v, publish the info to cloud", macAddr, ipAddr) uuidStr := aiStatus.Key() aiStatusPtr := &aiStatus if aiStatusPtr.MaybeUpdateAppIPAddr(macAddr, ipAddr.String()) { log.Debugf("findVifAndTrigAppInfoUpload: underlay %v", aiStatusPtr.UnderlayNetworks) PublishAppInfoToZedCloud(ctx, uuidStr, aiStatusPtr, ctx.assignableAdapters, ctx.iteration) ctx.iteration++ break } } } func aclActionToProtoAction(action types.ACLActionType) flowlog.ACLAction { switch action { case types.ACLActionAccept: return flowlog.ACLAction_ActionAccept case types.ACLActionDrop: return flowlog.ACLAction_ActionDrop default: return flowlog.ACLAction_ActionUnknown } } func protoEncodeAppFlowMonitorProto(ipflow types.IPFlow) *flowlog.FlowMessage { pflows := new(flowlog.FlowMessage) pflows.DevId = ipflow.DevID.String() // ScopeInfo fill in pScope := new(flowlog.ScopeInfo) pScope.Uuid = ipflow.Scope.UUID.String() pScope.Intf = ipflow.Scope.Intf pScope.LocalIntf = ipflow.Scope.Localintf pScope.NetInstUUID = ipflow.Scope.NetUUID.String() pflows.Scope = pScope // get the ip flows from the input for _, rec := range ipflow.Flows { prec := new(flowlog.FlowRecord) // IpFlow fill in pIpflow := new(flowlog.IpFlow) pIpflow.Src = rec.Flow.Src.String() pIpflow.Dest = rec.Flow.Dst.String() pIpflow.SrcPort = int32(rec.Flow.SrcPort) pIpflow.DestPort = int32(rec.Flow.DstPort) pIpflow.Protocol = int32(rec.Flow.Proto) prec.Flow = pIpflow prec.Inbound = rec.Inbound prec.AclId = rec.ACLID prec.Action = aclActionToProtoAction(rec.Action) // prec.AclName = pStart := new(timestamp.Timestamp) pStart = timeNanoToProto(rec.StartTime) prec.StartTime = pStart pEnd := new(timestamp.Timestamp) pEnd = timeNanoToProto(rec.StopTime) prec.EndTime = pEnd prec.TxBytes = rec.TxBytes prec.TxPkts = rec.TxPkts prec.RxBytes = rec.RxBytes prec.RxPkts = rec.RxPkts pflows.Flows = append(pflows.Flows, prec) } // get the ip DNS records from the input for _, dns := range ipflow.DNSReqs { pdns := new(flowlog.DnsRequest) pdns.HostName = dns.HostName for _, address := range dns.Addrs { pdns.Addrs = append(pdns.Addrs, address.String()) } dnsTime := new(timestamp.Timestamp) dnsTime = timeNanoToProto(dns.RequestTime) pdns.RequestTime = dnsTime pdns.AclNum = dns.ACLNum pflows.DnsReqs = append(pflows.DnsReqs, pdns) } return pflows } func sendFlowProtobuf(protoflows *flowlog.FlowMessage) { flowQ.PushBack(protoflows) for flowQ.Len() > 0 { ent := flowQ.Front() pflowsPtr := ent.Value.(*flowlog.FlowMessage) data, err := proto.Marshal(pflowsPtr) if err != nil { log.Errorf("FlowStats: SendFlowProtobuf proto marshaling error %v", err) // XXX change to fatal } flowIteration++ buf := bytes.NewBuffer(data) size := int64(proto.Size(pflowsPtr)) flowlogURL := zedcloud.URLPathString(serverNameAndPort, zedcloudCtx.V2API, false, devUUID, "flowlog") const return400 = false _, _, rtf, err := zedcloud.SendOnAllIntf(&zedcloudCtx, flowlogURL, size, buf, flowIteration, return400) if err != nil { if rtf == types.SenderStatusRemTempFail { log.Errorf("FlowStats: sendFlowProtobuf remoteTemporaryFailure: %s", err) } else { log.Errorf("FlowStats: sendFlowProtobuf failed: %s", err) } flowIteration-- if flowQ.Len() > 100 { // if fail to send for too long, start to drop flowQ.Remove(ent) } return } log.Debugf("Send Flow protobuf out on all intfs, message size %d, flowQ size %d", size, flowQ.Len()) writeSentFlowProtoMessage(data) flowQ.Remove(ent) } } func timeNanoToProto(timenum int64) *timestamp.Timestamp { timeProto, _ := ptypes.TimestampProto(time.Unix(0, timenum)) return timeProto } func writeSentFlowProtoMessage(contents []byte) { writeProtoMessage("lastflowlog", contents) }
func prepareAndPublishNetworkInstanceInfoMsg(ctx *zedagentContext,
replication.rs
use crate::conf::GroupInfo; use crate::qpaxos::Command; use crate::qpaxos::MakeRequest; use crate::replica::InstanceStatus; use crate::replica::Replica; use crate::replica::ReplicationStatus; use crate::replication::bcast_msg; use crate::replication::handle_accept_reply; use crate::replication::handle_prepare_reply; use crate::ReplicationError; use crate::StorageAPI; /// replicate runs replication algo to forward instance to other replica in leader's group. /// An OK return value indicate the instance becomes safe, but not yet committed. /// An Err return value means the instance could be unsafe yet. /// /// On success it returns the status containing an instance and replication status. pub async fn
( cmds: &[Command], g: &GroupInfo, r: &Replica, ) -> Result<ReplicationStatus, ReplicationError> { let grids: Vec<_> = g.replicas.keys().cloned().collect(); println!("grids:{:?}", grids); let inst = r.new_instance(cmds)?; let n = grids.len(); let mut st = ReplicationStatus::new(n as i32, inst); println!("st:{:?}", st); // a special path for n = 1 let fast = st.get_fastpath_deps(&grids); match fast { Some(fdeps) => { st.instance.deps = Some(fdeps.into()); // instance is safe to commit. return Ok(st); } None => { // not enough fast replies, continue } }; // TODO not impl yet. let mut deps_committed = vec![]; for _ in 0..g.replicas.len() { deps_committed.push(false); } let req = MakeRequest::prepare(0, &st.instance, &deps_committed); let repls = bcast_msg(&r.peers, req).await; println!("got {} replies", repls.len()); for (from_rid, repl) in repls.iter() { println!("fast-reply from:{} {}", from_rid, repl.get_ref()); // TODO consume repl do not clone handle_prepare_reply(&mut st, *from_rid, repl.get_ref().clone())?; let fast = st.get_fastpath_deps(&grids); match fast { Some(fdeps) => { st.instance.deps = Some(fdeps.into()); // instance is safe to commit. return Ok(st); } None => { // not enough fast replies, continue } }; } let adeps = st.get_slowpath_deps(&grids); // println!("st.fast_deps: {:?}", st.fast_deps); println!("got accept deps:{:?}", adeps); let adeps = adeps.ok_or(ReplicationError::NotEnoughQuorum( InstanceStatus::Prepared, st.quorum, st.prepared[&0].replied.len() as i32, ))?; // slow path st.instance.deps = Some(adeps.into()); st.start_accept(); r.storage .set_instance(&st.instance.instance_id.unwrap(), &st.instance)?; let req = MakeRequest::accept(0, &st.instance); let repls = bcast_msg(&r.peers, req).await; for (from_rid, repl) in repls.iter() { handle_accept_reply(&mut st, *from_rid, repl.get_ref().clone())?; if st.accepted.len() as i32 >= st.quorum { // instance is safe to commit. return Ok(st); } } // println!("{:?}", st.fast_deps); Err(ReplicationError::NotEnoughQuorum( InstanceStatus::Accepted, st.quorum, st.accepted.len() as i32, )) }
replicate
owlbot.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script is used to synthesize generated parts of this library.""" import logging from pathlib import Path import subprocess import synthtool as s from synthtool.languages import php from synthtool import _tracked_paths logging.basicConfig(level=logging.DEBUG) src = Path(f"../{php.STAGING_DIR}/Redis").resolve() dest = Path().resolve() # Added so that we can pass copy_excludes in the owlbot_main() call _tracked_paths.add(src) php.owlbot_main( src=src, dest=dest, copy_excludes=[ src / "*/src/V1/CloudRedisClient.php", src / "*/src/V1beta1/CloudRedisClient.php" ] ) # document and utilize apiEndpoint instead of serviceAddress s.replace( "**/Gapic/*GapicClient.php", r"'serviceAddress' =>", r"'apiEndpoint' =>") s.replace( "**/Gapic/*GapicClient.php", r"@type string \$serviceAddress\n\s+\*\s+The address", r"""@type string $serviceAddress * **Deprecated**. This option will be removed in a future major release. Please * utilize the `$apiEndpoint` option instead. * @type string $apiEndpoint * The address""") s.replace( "**/Gapic/*GapicClient.php", r"\$transportConfig, and any \$serviceAddress", r"$transportConfig, and any `$apiEndpoint`") # V1 is GA, so remove @experimental tags s.replace( 'src/V1/**/*Client.php', r'^(\s+\*\n)?\s+\*\s@experimental\n', '') # Change the wording for the deprecation warning. s.replace( 'src/*/*_*.php', r'will be removed in the next major release', 'will be removed in a future release') # Fix class references in gapic samples for version in ['V1', 'V1beta1']: pathExpr = 'src/' + version + '/Gapic/CloudRedisGapicClient.php' types = { 'new CloudRedisClient': r'new Google\\Cloud\\Redis\\'+ version + r'\\CloudRedisClient', 'new Instance': r'new Google\\Cloud\\Redis\\' + version + r'\\Instance', '= Tier::': r'= Google\\Cloud\\Redis\\' + version + r'\\Instance\\Tier::', 'new FieldMask': r'new Google\\Protobuf\\FieldMask', 'new InputConfig': r'new Google\\Cloud\\Redis\\' + version + r'\\InputConfig', 'new OutputConfig': r'new Google\\Cloud\\Redis\\' + version + r'\\OutputConfig', '= DataProtectionMode': r'= Google\\Cloud\\Redis\\' + version + r'\\FailoverInstanceRequest\\DataProtectionMode::' } for search, replace in types.items(): s.replace( pathExpr, search, replace ) ### [START] protoc backwards compatibility fixes # roll back to private properties. s.replace( "src/**/V*/**/*.php", r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$", r"""Generated from protobuf field \1 */
# prevent proto messages from being marked final s.replace( "src/**/V*/**/*.php", r"final class", r"class") # Replace "Unwrapped" with "Value" for method names. s.replace( "src/**/V*/**/*.php", r"public function ([s|g]\w{3,})Unwrapped", r"public function \1Value" ) ### [END] protoc backwards compatibility fixes # fix relative cloud.google.com links s.replace( "src/**/V*/**/*.php", r"(.{0,})\]\((/.{0,})\)", r"\1](https://cloud.google.com\2)" )
private $""")
client.py
"""Let's Encrypt client API.""" import logging import os from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa import OpenSSL import zope.component from acme import client as acme_client from acme import jose from acme import messages import letsencrypt from letsencrypt import account from letsencrypt import auth_handler from letsencrypt import configuration from letsencrypt import constants from letsencrypt import continuity_auth from letsencrypt import crypto_util from letsencrypt import errors from letsencrypt import error_handler from letsencrypt import interfaces from letsencrypt import le_util from letsencrypt import reverter from letsencrypt import storage from letsencrypt.display import ops as display_ops from letsencrypt.display import enhancements logger = logging.getLogger(__name__) def acme_from_config_key(config, key): "Wrangle ACME client construction" # TODO: Allow for other alg types besides RS256 net = acme_client.ClientNetwork(key, verify_ssl=(not config.no_verify_ssl), user_agent=_determine_user_agent(config)) return acme_client.Client(config.server, key=key, net=net) def _determine_user_agent(config): """ Set a user_agent string in the config based on the choice of plugins. (this wasn't knowable at construction time) :returns: the client's User-Agent string :rtype: `str` """ if config.user_agent is None: ua = "LetsEncryptPythonClient/{0} ({1}) Authenticator/{2} Installer/{3}" ua = ua.format(letsencrypt.__version__, " ".join(le_util.get_os_info()), config.authenticator, config.installer) else: ua = config.user_agent return ua def register(config, account_storage, tos_cb=None): """Register new account with an ACME CA. This function takes care of generating fresh private key, registering the account, optionally accepting CA Terms of Service and finally saving the account. It should be called prior to initialization of `Client`, unless account has already been created. :param .IConfig config: Client configuration. :param .AccountStorage account_storage: Account storage where newly registered account will be saved to. Save happens only after TOS acceptance step, so any account private keys or `.RegistrationResource` will not be persisted if `tos_cb` returns ``False``. :param tos_cb: If ACME CA requires the user to accept a Terms of Service before registering account, client action is necessary. For example, a CLI tool would prompt the user acceptance. `tos_cb` must be a callable that should accept `.RegistrationResource` and return a `bool`: ``True`` iff the Terms of Service present in the contained `.Registration.terms_of_service` is accepted by the client, and ``False`` otherwise. ``tos_cb`` will be called only if the client acction is necessary, i.e. when ``terms_of_service is not None``. This argument is optional, if not supplied it will default to automatic acceptance! :raises letsencrypt.errors.Error: In case of any client problems, in particular registration failure, or unaccepted Terms of Service. :raises acme.errors.Error: In case of any protocol problems. :returns: Newly registered and saved account, as well as protocol API handle (should be used in `Client` initialization). :rtype: `tuple` of `.Account` and `acme.client.Client` """ # Log non-standard actions, potentially wrong API calls if account_storage.find_all(): logger.info("There are already existing accounts for %s", config.server) if config.email is None: if not config.register_unsafely_without_email: msg = ("No email was provided and " "--register-unsafely-without-email was not present.") logger.warn(msg) raise errors.Error(msg) logger.warn("Registering without email!") # Each new registration shall use a fresh new key key = jose.JWKRSA(key=jose.ComparableRSAKey( rsa.generate_private_key( public_exponent=65537, key_size=config.rsa_key_size, backend=default_backend()))) acme = acme_from_config_key(config, key) # TODO: add phone? regr = perform_registration(acme, config) if regr.terms_of_service is not None: if tos_cb is not None and not tos_cb(regr): raise errors.Error( "Registration cannot proceed without accepting " "Terms of Service.") regr = acme.agree_to_tos(regr) acc = account.Account(regr, key) account.report_new_account(acc, config) account_storage.save(acc) return acc, acme def perform_registration(acme, config): """ Actually register new account, trying repeatedly if there are email problems :param .IConfig config: Client configuration. :param acme.client.Client client: ACME client object. :returns: Registration Resource. :rtype: `acme.messages.RegistrationResource` :raises .UnexpectedUpdate: """ try: return acme.register(messages.NewRegistration.from_data(email=config.email)) except messages.Error, e: err = repr(e) if "MX record" in err or "Validation of contact mailto" in err: config.namespace.email = display_ops.get_email(more=True, invalid=True) return perform_registration(acme, config) else: raise class Client(object): """ACME protocol client. :ivar .IConfig config: Client configuration. :ivar .Account account: Account registered with `register`. :ivar .AuthHandler auth_handler: Authorizations handler that will dispatch DV and Continuity challenges to appropriate authenticators (providing `.IAuthenticator` interface). :ivar .IAuthenticator dv_auth: Prepared (`.IAuthenticator.prepare`) authenticator that can solve the `.constants.DV_CHALLENGES`. :ivar .IInstaller installer: Installer. :ivar acme.client.Client acme: Optional ACME client API handle. You might already have one from `register`. """ def __init__(self, config, account_, dv_auth, installer, acme=None): """Initialize a client.""" self.config = config self.account = account_ self.dv_auth = dv_auth self.installer = installer # Initialize ACME if account is provided if acme is None and self.account is not None: acme = acme_from_config_key(config, self.account.key) self.acme = acme # TODO: Check if self.config.enroll_autorenew is None. If # so, set it based to the default: figure out if dv_auth is # standalone (then default is False, otherwise default is True) if dv_auth is not None: cont_auth = continuity_auth.ContinuityAuthenticator(config, installer) self.auth_handler = auth_handler.AuthHandler( dv_auth, cont_auth, self.acme, self.account) else: self.auth_handler = None def _obtain_certificate(self, domains, csr): """Obtain certificate. Internal function with precondition that `domains` are consistent with identifiers present in the `csr`. :param list domains: Domain names. :param .le_util.CSR csr: DER-encoded Certificate Signing Request. The key used to generate this CSR can be different than `authkey`. :returns: `.CertificateResource` and certificate chain (as returned by `.fetch_chain`). :rtype: tuple """ if self.auth_handler is None: msg = ("Unable to obtain certificate because authenticator is " "not set.") logger.warning(msg) raise errors.Error(msg) if self.account.regr is None: raise errors.Error("Please register with the ACME server first.") logger.debug("CSR: %s, domains: %s", csr, domains) authzr = self.auth_handler.get_authorizations(domains) certr = self.acme.request_issuance( jose.ComparableX509(OpenSSL.crypto.load_certificate_request( OpenSSL.crypto.FILETYPE_ASN1, csr.data)), authzr) return certr, self.acme.fetch_chain(certr) def obtain_certificate_from_csr(self, csr): """Obtain certficiate from CSR. :param .le_util.CSR csr: DER-encoded Certificate Signing Request. :returns: `.CertificateResource` and certificate chain (as returned by `.fetch_chain`). :rtype: tuple """ return self._obtain_certificate( # TODO: add CN to domains? crypto_util.get_sans_from_csr( csr.data, OpenSSL.crypto.FILETYPE_ASN1), csr) def obtain_certificate(self, domains): """Obtains a certificate from the ACME server. `.register` must be called before `.obtain_certificate` :param set domains: domains to get a certificate :returns: `.CertificateResource`, certificate chain (as returned by `.fetch_chain`), and newly generated private key (`.le_util.Key`) and DER-encoded Certificate Signing Request (`.le_util.CSR`). :rtype: tuple """ # Create CSR from names key = crypto_util.init_save_key( self.config.rsa_key_size, self.config.key_dir) csr = crypto_util.init_save_csr(key, domains, self.config.csr_dir) return self._obtain_certificate(domains, csr) + (key, csr) def obtain_and_enroll_certificate(self, domains): """Obtain and enroll certificate. Get a new certificate for the specified domains using the specified authenticator and installer, and then create a new renewable lineage containing it. :param list domains: Domains to request. :param plugins: A PluginsFactory object. :returns: A new :class:`letsencrypt.storage.RenewableCert` instance referred to the enrolled cert lineage, or False if the cert could not be obtained. """ certr, chain, key, _ = self.obtain_certificate(domains) # XXX: We clearly need a more general and correct way of getting # options into the configobj for the RenewableCert instance. # This is a quick-and-dirty way to do it to allow integration # testing to start. (Note that the config parameter to new_lineage # ideally should be a ConfigObj, but in this case a dict will be # accepted in practice.) params = vars(self.config.namespace) config = {} cli_config = configuration.RenewerConfiguration(self.config.namespace) if (cli_config.config_dir != constants.CLI_DEFAULTS["config_dir"] or cli_config.work_dir != constants.CLI_DEFAULTS["work_dir"]): logger.warning( "Non-standard path(s), might not work with crontab installed " "by your operating system package manager") lineage = storage.RenewableCert.new_lineage( domains[0], OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, certr.body), key.pem, crypto_util.dump_pyopenssl_chain(chain), params, config, cli_config) return lineage def save_certificate(self, certr, chain_cert, cert_path, chain_path, fullchain_path): """Saves the certificate received from the ACME server. :param certr: ACME "certificate" resource. :type certr: :class:`acme.messages.Certificate` :param list chain_cert: :param str cert_path: Candidate path to a certificate. :param str chain_path: Candidate path to a certificate chain. :param str fullchain_path: Candidate path to a full cert chain. :returns: cert_path, chain_path, and fullchain_path as absolute paths to the actual files :rtype: `tuple` of `str` :raises IOError: If unable to find room to write the cert files """ for path in cert_path, chain_path, fullchain_path: le_util.make_or_verify_dir( os.path.dirname(path), 0o755, os.geteuid(), self.config.strict_permissions) cert_pem = OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, certr.body) cert_file, act_cert_path = le_util.unique_file(cert_path, 0o644) try: cert_file.write(cert_pem) finally: cert_file.close() logger.info("Server issued certificate; certificate written to %s", act_cert_path) cert_chain_abspath = None fullchain_abspath = None if chain_cert: chain_pem = crypto_util.dump_pyopenssl_chain(chain_cert) cert_chain_abspath = _save_chain(chain_pem, chain_path) fullchain_abspath = _save_chain(cert_pem + chain_pem, fullchain_path) return os.path.abspath(act_cert_path), cert_chain_abspath, fullchain_abspath def deploy_certificate(self, domains, privkey_path, cert_path, chain_path, fullchain_path): """Install certificate :param list domains: list of domains to install the certificate :param str privkey_path: path to certificate private key :param str cert_path: certificate file path (optional) :param str chain_path: chain file path """ if self.installer is None: logger.warning("No installer specified, client is unable to deploy" "the certificate") raise errors.Error("No installer available") chain_path = None if chain_path is None else os.path.abspath(chain_path) with error_handler.ErrorHandler(self.installer.recovery_routine): for dom in domains: self.installer.deploy_cert( domain=dom, cert_path=os.path.abspath(cert_path), key_path=os.path.abspath(privkey_path), chain_path=chain_path, fullchain_path=fullchain_path) self.installer.save() # needed by the Apache plugin self.installer.save("Deployed Let's Encrypt Certificate") msg = ("We were unable to install your certificate, " "however, we successfully restored your " "server to its prior configuration.") with error_handler.ErrorHandler(self._rollback_and_restart, msg): # sites may have been enabled / final cleanup self.installer.restart() def enhance_config(self, domains, config): """Enhance the configuration. :param list domains: list of domains to configure :ivar config: Namespace typically produced by :meth:`argparse.ArgumentParser.parse_args`. it must have the redirect, hsts and uir attributes. :type namespace: :class:`argparse.Namespace` :raises .errors.Error: if no installer is specified in the client. """ if self.installer is None: logger.warning("No installer is specified, there isn't any " "configuration to enhance.") raise errors.Error("No installer available") if config is None: logger.warning("No config is specified.") raise errors.Error("No config available") redirect = config.redirect hsts = config.hsts uir = config.uir # Upgrade Insecure Requests if redirect is None: redirect = enhancements.ask("redirect") if redirect: self.apply_enhancement(domains, "redirect") if hsts: self.apply_enhancement(domains, "ensure-http-header", "Strict-Transport-Security") if uir: self.apply_enhancement(domains, "ensure-http-header", "Upgrade-Insecure-Requests") msg = ("We were unable to restart web server") if redirect or hsts or uir: with error_handler.ErrorHandler(self._rollback_and_restart, msg): self.installer.restart() def apply_enhancement(self, domains, enhancement, options=None): """Applies an enhacement on all domains. :param domains: list of ssl_vhosts :type list of str :param enhancement: name of enhancement, e.g. ensure-http-header :type str .. note:: when more options are need make options a list. :param options: options to enhancement, e.g. Strict-Transport-Security :type str :raises .errors.PluginError: If Enhancement is not supported, or if there is any other problem with the enhancement. """ msg = ("We were unable to set up enhancement %s for your server, " "however, we successfully installed your certificate." % (enhancement)) with error_handler.ErrorHandler(self._recovery_routine_with_msg, msg): for dom in domains: try: self.installer.enhance(dom, enhancement, options) except errors.PluginEnhancementAlreadyPresent: logger.warn("Enhancement %s was already set.", enhancement) except errors.PluginError: logger.warn("Unable to set enhancement %s for %s", enhancement, dom) raise self.installer.save("Add enhancement %s" % (enhancement)) def _recovery_routine_with_msg(self, success_msg): """Calls the installer's recovery routine and prints success_msg :param str success_msg: message to show on successful recovery """ self.installer.recovery_routine() reporter = zope.component.getUtility(interfaces.IReporter) reporter.add_message(success_msg, reporter.HIGH_PRIORITY) def _rollback_and_restart(self, success_msg): """Rollback the most recent checkpoint and restart the webserver :param str success_msg: message to show on successful rollback """ logger.critical("Rolling back to previous server configuration...") reporter = zope.component.getUtility(interfaces.IReporter) try: self.installer.rollback_checkpoints() self.installer.restart() except: # TODO: suggest letshelp-letsencypt here reporter.add_message( "An error occurred and we failed to restore your config and " "restart your server. Please submit a bug report to " "https://github.com/letsencrypt/letsencrypt", reporter.HIGH_PRIORITY) raise reporter.add_message(success_msg, reporter.HIGH_PRIORITY) def validate_key_csr(privkey, csr=None): """Validate Key and CSR files. Verifies that the client key and csr arguments are valid and correspond to one another. This does not currently check the names in the CSR due to the inability to read SANs from CSRs in python crypto libraries. If csr is left as None, only the key will be validated. :param privkey: Key associated with CSR :type privkey: :class:`letsencrypt.le_util.Key` :param .le_util.CSR csr: CSR :raises .errors.Error: when validation fails """ # TODO: Handle all of these problems appropriately # The client can eventually do things like prompt the user # and allow the user to take more appropriate actions # Key must be readable and valid. if privkey.pem and not crypto_util.valid_privkey(privkey.pem): raise errors.Error("The provided key is not a valid key") if csr: if csr.form == "der": csr_obj = OpenSSL.crypto.load_certificate_request( OpenSSL.crypto.FILETYPE_ASN1, csr.data) csr = le_util.CSR(csr.file, OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, csr_obj), "pem") # If CSR is provided, it must be readable and valid. if csr.data and not crypto_util.valid_csr(csr.data): raise errors.Error("The provided CSR is not a valid CSR") # If both CSR and key are provided, the key must be the same key used # in the CSR. if csr.data and privkey.pem: if not crypto_util.csr_matches_pubkey( csr.data, privkey.pem): raise errors.Error("The key and CSR do not match") def rollback(default_installer, checkpoints, config, plugins): """Revert configuration the specified number of checkpoints. :param int checkpoints: Number of checkpoints to revert. :param config: Configuration. :type config: :class:`letsencrypt.interfaces.IConfig` """ # Misconfigurations are only a slight problems... allow the user to rollback installer = display_ops.pick_installer( config, default_installer, plugins, question="Which installer " "should be used for rollback?") # No Errors occurred during init... proceed normally # If installer is None... couldn't find an installer... there shouldn't be # anything to rollback if installer is not None: installer.rollback_checkpoints(checkpoints) installer.restart() def view_config_changes(config):
def _save_chain(chain_pem, chain_path): """Saves chain_pem at a unique path based on chain_path. :param str chain_pem: certificate chain in PEM format :param str chain_path: candidate path for the cert chain :returns: absolute path to saved cert chain :rtype: str """ chain_file, act_chain_path = le_util.unique_file(chain_path, 0o644) try: chain_file.write(chain_pem) finally: chain_file.close() logger.info("Cert chain written to %s", act_chain_path) # This expects a valid chain file return os.path.abspath(act_chain_path)
"""View checkpoints and associated configuration changes. .. note:: This assumes that the installation is using a Reverter object. :param config: Configuration. :type config: :class:`letsencrypt.interfaces.IConfig` """ rev = reverter.Reverter(config) rev.recovery_routine() rev.view_config_changes()
coins.test.ts
/* eslint-disable node/no-unpublished-import */ import test from 'ava'; import {match, rest, strike} from '../../../src'; import {Coin, CoinCondition, CoinRarity, CoinType} from './types'; /** * @file e2e test using `strike`, constant value matching, and `otherwise` */ function appraiseCoin(coin: Coin) { const conditionFactor = strike( coin.condish, match(CoinCondition.Mint, 5), rest(1) ); const yearFactor = 2020 - coin.year; const rarityFactor = strike( coin, match({rarity: CoinRarity.Rare}, 25), match({rarity: CoinRarity.Uncommon}, 1), match({rarity: CoinRarity.Legendary}, 50), rest(0) ); const baseValue = strike(
match({type: CoinType.Nickel}, 0.05), match({type: CoinType.Quarter}, 0.25), rest(0) ); return ( baseValue * rarityFactor + baseValue * yearFactor + baseValue * conditionFactor ); } test('e2e(coins): appraiseCoin should properly appraise old ass quarter', t => { // arrange const oldAssQuarter: Coin = { year: 1764, type: CoinType.Quarter, condish: CoinCondition.Mint, rarity: CoinRarity.Legendary, }; // act const value = appraiseCoin(oldAssQuarter); // assert t.is(value, 77.75); }); test('e2e(coins): appraiseCoin should properly appraise new ass penny', t => { // arrange const newAssPenny: Coin = { year: 2020, type: CoinType.Penny, condish: CoinCondition.Good, rarity: CoinRarity.Common, }; // act const value = appraiseCoin(newAssPenny); // assert t.is(value, 0.01); });
coin, match({type: CoinType.Dime}, 0.1), match({type: CoinType.Penny}, 0.01),
spi.rs
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 use anyhow::{ensure, Result}; use rusb::{Direction, Recipient, RequestType}; use std::mem::size_of; use std::rc::Rc; use zerocopy::{AsBytes, FromBytes}; use crate::io::spi::{SpiError, Target, Transfer, TransferMode}; use crate::transport::hyperdebug::{BulkInterface, Inner}; use crate::transport::TransportError; pub struct HyperdebugSpiTarget { inner: Rc<Inner>, interface: BulkInterface, _target_idx: u8, max_chunk_size: usize, } const USB_SPI_PKT_ID_CMD_GET_USB_SPI_CONFIG: u16 = 0; const USB_SPI_PKT_ID_RSP_USB_SPI_CONFIG: u16 = 1; const USB_SPI_PKT_ID_CMD_TRANSFER_START: u16 = 2; const USB_SPI_PKT_ID_CMD_TRANSFER_CONTINUE: u16 = 3; //const USB_SPI_PKT_ID_CMD_RESTART_RESPONSE: u16 = 4; const USB_SPI_PKT_ID_RSP_TRANSFER_START: u16 = 5; const USB_SPI_PKT_ID_RSP_TRANSFER_CONTINUE: u16 = 6; //const USB_SPI_REQ_DISABLE: u8 = 1; const USB_SPI_REQ_ENABLE: u8 = 0; const USB_MAX_SIZE: usize = 64; const FULL_DUPLEX: usize = 65535; #[derive(AsBytes, FromBytes, Debug, Default)] #[repr(C)] struct RspUsbSpiConfig { packet_id: u16, max_write_chunk: u16, max_read_chunk: u16, feature_bitmap: u16, } #[derive(AsBytes, FromBytes, Debug)] #[repr(C)] struct CmdTransferStart { packet_id: u16, write_count: u16, read_count: u16, data: [u8; USB_MAX_SIZE - 6], } impl CmdTransferStart { fn new() -> Self { Self { packet_id: USB_SPI_PKT_ID_CMD_TRANSFER_START, write_count: 0, read_count: 0, data: [0; USB_MAX_SIZE - 6], } } } #[derive(AsBytes, FromBytes, Debug)] #[repr(C)] struct CmdTransferContinue { packet_id: u16, data_index: u16, data: [u8; USB_MAX_SIZE - 4], } impl CmdTransferContinue { fn new() -> Self { Self { packet_id: USB_SPI_PKT_ID_CMD_TRANSFER_CONTINUE, data_index: 0, data: [0; USB_MAX_SIZE - 4], } } } #[derive(AsBytes, FromBytes, Debug)] #[repr(C)] struct
{ packet_id: u16, status_code: u16, data: [u8; USB_MAX_SIZE - 4], } impl RspTransferStart { fn new() -> Self { Self { packet_id: 0, status_code: 0, data: [0; USB_MAX_SIZE - 4], } } } #[derive(AsBytes, FromBytes, Debug)] #[repr(C)] struct RspTransferContinue { packet_id: u16, data_index: u16, data: [u8; USB_MAX_SIZE - 4], } impl RspTransferContinue { fn new() -> Self { Self { packet_id: 0, data_index: 0, data: [0; USB_MAX_SIZE - 4], } } } impl HyperdebugSpiTarget { pub fn open(inner: &Rc<Inner>, spi_interface: &BulkInterface, idx: u8) -> Result<Self> { let mut usb_handle = inner.usb_device.borrow_mut(); // Tell HyperDebug to enable SPI bridge. usb_handle.write_control( rusb::request_type(Direction::Out, RequestType::Vendor, Recipient::Interface), USB_SPI_REQ_ENABLE, 0, /* wValue */ spi_interface.interface as u16, &mut [], )?; // Exclusively claim SPI interface, preparing for bulk transfers. usb_handle.claim_interface(spi_interface.interface)?; // Initial bulk request/response to query capabilities. usb_handle.write_bulk( spi_interface.out_endpoint, &USB_SPI_PKT_ID_CMD_GET_USB_SPI_CONFIG.to_le_bytes(), )?; let mut resp: RspUsbSpiConfig = Default::default(); let rc = usb_handle.read_bulk(spi_interface.in_endpoint, resp.as_bytes_mut())?; ensure!( rc == size_of::<RspUsbSpiConfig>(), TransportError::CommunicationError( "Unrecognized reponse to GET_USB_SPI_CONFIG".to_string() ) ); ensure!( resp.packet_id == USB_SPI_PKT_ID_RSP_USB_SPI_CONFIG, TransportError::CommunicationError( "Unrecognized reponse to GET_USB_SPI_CONFIG".to_string() ) ); // Verify that interface supports concurrent read/write. ensure!( (resp.feature_bitmap & 0x0001) != 0, TransportError::CommunicationError( "HyperDebug does not support bidirectional SPI".to_string() ) ); Ok(Self { inner: Rc::clone(&inner), interface: *spi_interface, _target_idx: idx, max_chunk_size: std::cmp::min(resp.max_write_chunk, resp.max_read_chunk) as usize, }) } /// Transmit data for a single SPI operation, using one or more USB packets. fn transmit(&self, wbuf: &[u8], rbuf_len: usize) -> Result<()> { let mut req = CmdTransferStart::new(); req.write_count = wbuf.len() as u16; req.read_count = rbuf_len as u16; let databytes = std::cmp::min(USB_MAX_SIZE - 6, wbuf.len()); req.data[0..databytes].clone_from_slice(&wbuf[0..databytes]); self.usb_write_bulk(&req.as_bytes()[0..6 + databytes])?; let mut index = databytes; while index < wbuf.len() { let mut req = CmdTransferContinue::new(); req.data_index = index as u16; let databytes = std::cmp::min(USB_MAX_SIZE - 4, wbuf.len() - index); req.data[0..databytes].clone_from_slice(&wbuf[index..index + databytes]); self.usb_write_bulk(&req.as_bytes()[0..4 + databytes])?; index += databytes; } Ok(()) } /// Receive data for a single SPI operation, using one or more USB packets. fn receive(&self, rbuf: &mut [u8]) -> Result<()> { let mut resp = RspTransferStart::new(); let bytecount = self.usb_read_bulk(&mut resp.as_bytes_mut())?; ensure!( bytecount >= 4, TransportError::CommunicationError( "Unrecognized reponse to TRANSFER_START".to_string() ) ); ensure!( resp.packet_id == USB_SPI_PKT_ID_RSP_TRANSFER_START, TransportError::CommunicationError( "Unrecognized reponse to TRANSFER_START".to_string() ) ); ensure!( resp.status_code == 0, TransportError::CommunicationError("SPI error".to_string()) ); let databytes = bytecount - 4; rbuf[0..databytes].clone_from_slice(&resp.data[0..databytes]); let mut index = databytes; while index < rbuf.len() { let mut resp = RspTransferContinue::new(); let bytecount = self.usb_read_bulk(&mut resp.as_bytes_mut())?; ensure!( bytecount > 4, TransportError::CommunicationError( "Unrecognized reponse to TRANSFER_START".to_string() ) ); ensure!( resp.packet_id == USB_SPI_PKT_ID_RSP_TRANSFER_CONTINUE, TransportError::CommunicationError( "Unrecognized reponse to TRANSFER_START".to_string() ) ); ensure!( resp.data_index == index as u16, TransportError::CommunicationError( "Unexpected byte index in reponse to TRANSFER_START".to_string() ) ); let databytes = bytecount - 4; rbuf[index..index + databytes].clone_from_slice(&resp.data[0..0 + databytes]); index += databytes; } Ok(()) } /// Send one USB packet. fn usb_write_bulk(&self, buf: &[u8]) -> Result<()> { self.inner .usb_device .borrow() .write_bulk(self.interface.out_endpoint, buf)?; Ok(()) } /// Receive one USB packet. fn usb_read_bulk(&self, buf: &mut [u8]) -> Result<usize> { Ok(self .inner .usb_device .borrow() .read_bulk(self.interface.in_endpoint, buf)?) } } impl Target for HyperdebugSpiTarget { fn get_transfer_mode(&self) -> Result<TransferMode> { Ok(TransferMode::Mode0) } fn set_transfer_mode(&self, _mode: TransferMode) -> Result<()> { todo!(); } fn get_bits_per_word(&self) -> Result<u32> { Ok(8) } fn set_bits_per_word(&self, bits_per_word: u32) -> Result<()> { match bits_per_word { 8 => Ok(()), _ => Err(SpiError::InvalidWordSize(bits_per_word).into()), } } fn get_max_speed(&self) -> Result<u32> { todo!(); } fn set_max_speed(&self, _frequency: u32) -> Result<()> { log::info!("Setting of SPI speed not implemented for HyperDebug, ignoring\n",); Ok(()) } fn get_max_transfer_count(&self) -> Result<usize> { // The protocol imposes no limits to the number of Transfers // in a transaction. Ok(usize::MAX) } fn max_chunk_size(&self) -> Result<usize> { Ok(self.max_chunk_size) } fn run_transaction(&self, transaction: &mut [Transfer]) -> Result<()> { let mut idx: usize = 0; while idx < transaction.len() { match &mut transaction[idx..] { [Transfer::Write(wbuf), Transfer::Read(rbuf), ..] => { // Hyperdebug can do SPI write followed by SPI read as a single USB // request/reply. Take advantage of that by detecting pairs of // Transfer::Write followed by Transfer::Read. ensure!( wbuf.len() <= self.max_chunk_size, SpiError::InvalidDataLength(wbuf.len()) ); ensure!( rbuf.len() <= self.max_chunk_size, SpiError::InvalidDataLength(rbuf.len()) ); self.transmit(wbuf, rbuf.len())?; self.receive(rbuf)?; // Skip two steps ahead, as two items were processed. idx += 2; continue; } [Transfer::Write(wbuf), ..] => { ensure!( wbuf.len() <= self.max_chunk_size, SpiError::InvalidDataLength(wbuf.len()) ); self.transmit(wbuf, 0)?; self.receive(&mut [])?; } [Transfer::Read(rbuf), ..] => { ensure!( rbuf.len() <= self.max_chunk_size, SpiError::InvalidDataLength(rbuf.len()) ); self.transmit(&[], rbuf.len())?; self.receive(rbuf)?; } [Transfer::Both(wbuf, rbuf), ..] => { ensure!( rbuf.len() == wbuf.len(), SpiError::MismatchedDataLength(wbuf.len(), rbuf.len()) ); ensure!( wbuf.len() <= self.max_chunk_size, SpiError::InvalidDataLength(wbuf.len()) ); self.transmit(wbuf, FULL_DUPLEX)?; self.receive(rbuf)?; } [] => (), } idx += 1; } Ok(()) } }
RspTransferStart
lib.rs
#[derive(PartialEq, Eq)] pub enum Shape { Equilateral, Isosceles, Scalene, } pub struct
; impl Triangle { pub fn build(sides: [u32; 3]) -> Result<Shape, ()> { let a = sides[0]; let b = sides[1]; let c = sides[2]; if a + b > c && b + c > a && c + a > b { if a == b && b == c { Ok(Shape::Equilateral) } else if a == b || b == c || c == a { Ok(Shape::Isosceles) } else { Ok(Shape::Scalene) } } else { Err(()) } } } impl Shape { pub fn is_equilateral(&self) -> bool { self == &Shape::Equilateral } pub fn is_isosceles(&self) -> bool { self == &Shape::Isosceles } pub fn is_scalene(&self) -> bool { self == &Shape::Scalene } }
Triangle
text_out_3.rs
#[doc = "Register `TEXT_OUT_3` reader"] pub struct R(crate::R<TEXT_OUT_3_SPEC>); impl core::ops::Deref for R { type Target = crate::R<TEXT_OUT_3_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<TEXT_OUT_3_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<TEXT_OUT_3_SPEC>) -> Self { R(reader) } }
#[doc = "Register `TEXT_OUT_3` writer"] pub struct W(crate::W<TEXT_OUT_3_SPEC>); impl core::ops::Deref for W { type Target = crate::W<TEXT_OUT_3_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<TEXT_OUT_3_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<TEXT_OUT_3_SPEC>) -> Self { W(writer) } } #[doc = "Field `TEXT_OUT_3` reader - This bits stores text_out_3 that is a part of result text material."] pub struct TEXT_OUT_3_R(crate::FieldReader<u32, u32>); impl TEXT_OUT_3_R { #[inline(always)] pub(crate) fn new(bits: u32) -> Self { TEXT_OUT_3_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TEXT_OUT_3_R { type Target = crate::FieldReader<u32, u32>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TEXT_OUT_3` writer - This bits stores text_out_3 that is a part of result text material."] pub struct TEXT_OUT_3_W<'a> { w: &'a mut W, } impl<'a> TEXT_OUT_3_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = value as u32; self.w } } impl R { #[doc = "Bits 0:31 - This bits stores text_out_3 that is a part of result text material."] #[inline(always)] pub fn text_out_3(&self) -> TEXT_OUT_3_R { TEXT_OUT_3_R::new(self.bits as u32) } } impl W { #[doc = "Bits 0:31 - This bits stores text_out_3 that is a part of result text material."] #[inline(always)] pub fn text_out_3(&mut self) -> TEXT_OUT_3_W { TEXT_OUT_3_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "result text material text_out_3 configure register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [text_out_3](index.html) module"] pub struct TEXT_OUT_3_SPEC; impl crate::RegisterSpec for TEXT_OUT_3_SPEC { type Ux = u32; } #[doc = "`read()` method returns [text_out_3::R](R) reader structure"] impl crate::Readable for TEXT_OUT_3_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [text_out_3::W](W) writer structure"] impl crate::Writable for TEXT_OUT_3_SPEC { type Writer = W; } #[doc = "`reset()` method sets TEXT_OUT_3 to value 0"] impl crate::Resettable for TEXT_OUT_3_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
useDataStatus.ts
import {useEffect, useState} from "react"; import {DataResponse} from "../models"; export function useDataStatus<T>( defaultValue: T, defaultStatus?: {loading: boolean; error?: string} ): [ T, {loading: boolean; error?: string}, (data: Promise<DataResponse<T>>) => Promise<void> ] { const [data, setDataInfo] = useState<T>(defaultValue); const [status, setStatus] = useState<{loading: boolean; error?: string}>( defaultStatus ? defaultStatus : {loading: true} );
setStatus({loading: true}); const response = await data; if (response.success === true) { setStatus({loading: false}); setDataInfo(response.response); return; } setStatus({loading: false, error: "Server gặp sự cố"}); } catch (err) { console.log(err); setStatus({loading: false, error: "Server gặp sự cố"}); } }; return [data, status, getData]; }
const getData = async (data: Promise<DataResponse<T>>) => { try {
example_mariokart.py
from nintendo.nex import backend, authentication, ranking, datastore from nintendo.games import MK8 from nintendo import account import requests import logging logging.basicConfig(level=logging.INFO) #Device id can be retrieved with a call to MCP_GetDeviceId on the Wii U #Serial number can be found on the back of the Wii U DEVICE_ID = 12345678 SERIAL_NUMBER = "..." SYSTEM_VERSION = 0x220 REGION_ID = 4 COUNTRY_ID = 94 REGION_NAME = "EUR" COUNTRY_NAME = "NL" USERNAME = "..." #Nintendo network id PASSWORD = "..." #Nintendo network password TRACK_ID = 27 #Mario Kart Stadium api = account.AccountAPI() api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION_ID, COUNTRY_NAME) api.set_title(MK8.TITLE_ID_EUR, MK8.LATEST_VERSION) api.login(USERNAME, PASSWORD) nex_token = api.get_nex_token(MK8.GAME_SERVER_ID) backend = backend.BackEndClient(MK8.ACCESS_KEY, MK8.NEX_VERSION) backend.connect(nex_token.host, nex_token.port) backend.login(nex_token.username, nex_token.password) ranking_client = ranking.RankingClient(backend.secure_client) order_param = ranking.RankingOrderParam() order_param.order_calc = ranking.RankingOrderCalc.ORDINAL order_param.offset = 499 #Start at 500th place order_param.count = 20 #Download 20 highscores rankings = ranking_client.get_ranking( ranking.RankingMode.GLOBAL, TRACK_ID, order_param, 0, 0 ) stats = ranking_client.get_stats( TRACK_ID, order_param, ranking.RankingStatFlags.ALL ).stats def
(score): millisec = score % 1000 seconds = score // 1000 % 60 minutes = score // 1000 // 60 return "%i:%02i.%03i" %(minutes, seconds, millisec) names = api.get_nnids([data.pid for data in rankings.datas]) #Print some interesting stats print("Total:", int(stats[0])) print("Total time:", format_time(stats[1])) print("Average time:", format_time(stats[2])) print("Lowest time:", format_time(stats[3])) print("Highest time:", format_time(stats[4])) print("Rankings:") for rankdata in rankings.datas: time = format_time(rankdata.score) print("\t%5i %20s %s" %(rankdata.rank, names[rankdata.pid], time)) #Let's download the replay file of whoever is in 500th place store = datastore.DataStoreClient(backend.secure_client) rankdata = rankings.datas[0] get_param = datastore.DataStorePrepareGetParam() get_param.persistence_target.owner_id = rankdata.pid get_param.persistence_target.persistence_id = TRACK_ID - 16 get_param.extra_data = ["WUP", str(REGION_ID), REGION_NAME, str(COUNTRY_ID), COUNTRY_NAME, ""] req_info = store.prepare_get_object(get_param) headers = {header.key: header.value for header in req_info.headers} replay_data = requests.get("http://" + req_info.url, headers=headers).content with open("replay.bin", "wb") as f: f.write(replay_data) #Close connection backend.close()
format_time
import-glob-circular.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // error-pattern: unresolved mod circ1 { #[legacy_exports]; use circ1::*; export f1; export f2; export common; fn f1()
fn common() -> uint { return 0u; } } mod circ2 { #[legacy_exports]; use circ2::*; export f1; export f2; export common; fn f2() { debug!("f2"); } fn common() -> uint { return 1u; } } mod test { #[legacy_exports]; use circ1::*; fn test() { f1066(); } }
{ debug!("f1"); }
run.go
package cmd import ( "bytes" "fmt" "io/ioutil" "os" "os/exec" "github.com/tombuildsstuff/golang-iis/iis/helpers" ) func (c Client) Run(commands string) (*string, *string, error) { rInt := helpers.RandomInt() filename := fmt.Sprintf("command-%d.ps1", rInt) err := ioutil.WriteFile(filename, []byte(commands), os.FileMode(0700)) if err != nil
var stderr bytes.Buffer var stdout bytes.Buffer // TODO: we could remove the need for a file by running these commands via WinRM, maybe? cmd := exec.Command("powershell.exe", "-ExecutionPolicy", "Bypass", "-NoLogo", "-NonInteractive", "-NoProfile", "-File", filename) cmd.Stderr = &stderr cmd.Stdout = &stdout defer os.Remove(filename) if err := cmd.Start(); err != nil { return nil, nil, fmt.Errorf("Error starting: %+v", err) } if err := cmd.Wait(); err != nil { return nil, nil, fmt.Errorf("Error waiting: %+v", err) } stdOutStr := stdout.String() stdErrStr := stderr.String() return &stdOutStr, &stdErrStr, nil }
{ return nil, nil, fmt.Errorf("Error writing command file: %+v", err) }
handler.py
import logging from django.contrib.auth.models import User from unplugged import RelatedPluginField, Schema, fields
logger = logging.getLogger(__name__) class MultiNotifierSchema(Schema): notifiers = fields.List( RelatedPluginField(plugin_type=NotifierPlugin), many=True, default=list ) class MultiNotifierNotifierHandlerPlugin(NotifierPlugin): plugin_name = "multinotifier" config_schema = MultiNotifierSchema def notify(self, notification): for notifier in self.config.get("notifiers", []): notifier.notify(notification)
from wampyre.realm import realm_manager from ...plugins import NotifierPlugin
mvcc.go
// Copyright 2019-present PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package mvcc import ( "encoding/binary" "github.com/pingcap/kvproto/pkg/kvrpcpb" "unsafe" "github.com/pingcap/tidb/util/codec" ) var defaultEndian = binary.LittleEndian // DBUserMeta is the user meta used in DB. type DBUserMeta []byte const dbUserMetaLen = 16 // DecodeLock decodes data to lock, the primary and value is copied, the secondaries are copied if async commit is enabled. func DecodeLock(data []byte) (l MvccLock) { l.MvccLockHdr = *(*MvccLockHdr)(unsafe.Pointer(&data[0])) cursor := mvccLockHdrSize lockBuf := append([]byte{}, data[cursor:]...) l.Primary = lockBuf[:l.PrimaryLen] cursor = int(l.PrimaryLen) if l.MvccLockHdr.SecondaryNum > 0 { l.Secondaries = make([][]byte, l.MvccLockHdr.SecondaryNum) for i := uint32(0); i < l.MvccLockHdr.SecondaryNum; i++ { keyLen := binary.LittleEndian.Uint16(lockBuf[cursor:]) cursor += 2 l.Secondaries[i] = lockBuf[cursor : cursor+int(keyLen)] cursor += int(keyLen) } } l.Value = lockBuf[cursor:] return } // MvccLockHdr holds fixed size fields for MvccLock. type MvccLockHdr struct { StartTS uint64 ForUpdateTS uint64 MinCommitTS uint64 TTL uint32 Op uint8 HasOldVer bool PrimaryLen uint16 UseAsyncCommit bool SecondaryNum uint32 } const mvccLockHdrSize = int(unsafe.Sizeof(MvccLockHdr{})) // MvccLock is the structure for MVCC lock. type MvccLock struct { MvccLockHdr Primary []byte Value []byte Secondaries [][]byte } // MarshalBinary implements encoding.BinaryMarshaler interface. func (l *MvccLock) MarshalBinary() []byte { lockLen := mvccLockHdrSize + len(l.Primary) + len(l.Value) length := lockLen if l.MvccLockHdr.SecondaryNum > 0 { for _, secondaryKey := range l.Secondaries { length += 2 length += len(secondaryKey) } } buf := make([]byte, length) hdr := (*MvccLockHdr)(unsafe.Pointer(&buf[0])) *hdr = l.MvccLockHdr cursor := mvccLockHdrSize copy(buf[cursor:], l.Primary) cursor += len(l.Primary) if l.MvccLockHdr.SecondaryNum > 0 { for _, secondaryKey := range l.Secondaries { binary.LittleEndian.PutUint16(buf[cursor:], uint16(len(secondaryKey))) cursor += 2 copy(buf[cursor:], secondaryKey) cursor += len(secondaryKey) } } copy(buf[cursor:], l.Value) cursor += len(l.Value) return buf } // ToLockInfo converts an MvccLock to kvrpcpb.LockInfo func (l *MvccLock) ToLockInfo(key []byte) *kvrpcpb.LockInfo { return &kvrpcpb.LockInfo{ PrimaryLock: l.Primary, LockVersion: l.StartTS, Key: key, LockTtl: uint64(l.TTL), LockType: kvrpcpb.Op(l.Op), LockForUpdateTs: l.ForUpdateTS, UseAsyncCommit: l.UseAsyncCommit, MinCommitTs: l.MinCommitTS, Secondaries: l.Secondaries, } } // UserMeta value for lock. const ( LockUserMetaNoneByte = 0 LockUserMetaDeleteByte = 2 ) // UserMeta byte slices for lock. var ( LockUserMetaNone = []byte{LockUserMetaNoneByte} LockUserMetaDelete = []byte{LockUserMetaDeleteByte} ) // DecodeKeyTS decodes the TS in a key. func DecodeKeyTS(buf []byte) uint64 { tsBin := buf[len(buf)-8:] _, ts, err := codec.DecodeUintDesc(tsBin) if err != nil { panic(err) } return ts } // NewDBUserMeta creates a new DBUserMeta. func
(startTS, commitTS uint64) DBUserMeta { m := make(DBUserMeta, 16) defaultEndian.PutUint64(m, startTS) defaultEndian.PutUint64(m[8:], commitTS) return m } // CommitTS reads the commitTS from the DBUserMeta. func (m DBUserMeta) CommitTS() uint64 { return defaultEndian.Uint64(m[8:]) } // StartTS reads the startTS from the DBUserMeta. func (m DBUserMeta) StartTS() uint64 { return defaultEndian.Uint64(m[:8]) } // EncodeExtraTxnStatusKey encodes a extra transaction status key. // It is only used for Rollback and Op_Lock. func EncodeExtraTxnStatusKey(key []byte, startTS uint64) []byte { b := append([]byte{}, key...) ret := codec.EncodeUintDesc(b, startTS) ret[0]++ return ret } // DecodeExtraTxnStatusKey decodes a extra transaction status key. func DecodeExtraTxnStatusKey(extraKey []byte) (key []byte) { if len(extraKey) <= 9 { return nil } key = append([]byte{}, extraKey[:len(extraKey)-8]...) key[0]-- return }
NewDBUserMeta
aaspi_slave.py
#!/bin/env python #========================================================================== # (c) 2004 Total Phase, Inc. #-------------------------------------------------------------------------- # Project : Aardvark Sample Code # File : aaspi_slave.py #-------------------------------------------------------------------------- # Configure the device as an SPI slave and watch incoming data. #-------------------------------------------------------------------------- # Redistribution and use of this file in source and binary forms, with # or without modification, are permitted. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #========================================================================== #========================================================================== # IMPORTS #========================================================================== import sys from aardvark_py import * #========================================================================== # CONSTANTS #========================================================================== BUFFER_SIZE = 65535 SLAVE_RESP_SIZE = 26 #========================================================================== # FUNCTIONS #========================================================================== def dump (handle, timeout_ms):
#========================================================================== # MAIN PROGRAM #========================================================================== if (len(sys.argv) < 4): print "usage: aaspi_slave PORT MODE TIMEOUT_MS" print " mode 0 : pol = 0, phase = 0" print " mode 1 : pol = 0, phase = 1" print " mode 2 : pol = 1, phase = 0" print " mode 3 : pol = 1, phase = 1" print "" print " The timeout value specifies the time to" print " block until the first packet is received." print " If the timeout is -1, the program will" print " block indefinitely." sys.exit() port = int(sys.argv[1]) mode = int(sys.argv[2]) timeout_ms = int(sys.argv[3]) handle = aa_open(port) if (handle <= 0): print "Unable to open Aardvark device on port %d" % port print "Error code = %d" % handle sys.exit() # Ensure that the SPI subsystem is enabled aa_configure(handle, AA_CONFIG_SPI_I2C) # Disable the Aardvark adapter's power pins. # This command is only effective on v2.0 hardware or greater. # The power pins on the v1.02 hardware are not enabled by default. aa_target_power(handle, AA_TARGET_POWER_NONE) # Setup the clock phase aa_spi_configure(handle, mode >> 1, mode & 1, AA_SPI_BITORDER_MSB) # Set the slave response slave_resp = array('B', [ 0 for i in range(SLAVE_RESP_SIZE) ]) for i in range(SLAVE_RESP_SIZE): slave_resp[i] = ord('A') + i aa_spi_slave_set_response(handle, slave_resp) # Enable the slave aa_spi_slave_enable(handle) # Watch the SPI port dump(handle, timeout_ms) # Disable the slave and close the device aa_spi_slave_disable(handle) aa_close(handle)
print "Watching slave SPI data..." # Wait for data on bus result = aa_async_poll(handle, timeout_ms) if (result != AA_ASYNC_SPI): print "No data available." return print "" trans_num = 0 # Loop until aa_spi_slave_read times out while 1: # Read the SPI message. # This function has an internal timeout (see datasheet). # To use a variable timeout the function aa_async_poll could # be used for subsequent messages. (num_read, data_in) = aa_spi_slave_read(handle, BUFFER_SIZE) if (num_read < 0 and num_read != AA_SPI_SLAVE_TIMEOUT): print "error: %s" % aa_status_string(num_read) return elif (num_read == 0 or num_read == AA_SPI_SLAVE_TIMEOUT): print "No more data available from SPI master" return else: # Dump the data to the screen sys.stdout.write("*** Transaction #%02d\n" % trans_num) sys.stdout.write("Data read from device:") for i in range(num_read): if ((i&0x0f) == 0): sys.stdout.write("\n%04x: " % i) sys.stdout.write("%02x " % (data_in[i] & 0xff)) if (((i+1)&0x07) == 0): sys.stdout.write(" ") sys.stdout.write("\n\n") trans_num = trans_num +1
eval_test.go
// Copyright 2015 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tree_test import ( "context" gosql "database/sql" "fmt" "path/filepath" "regexp" "strings" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/colexec" "github.com/cockroachdb/cockroach/pkg/sql/colexec/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec/execbuilder" "github.com/cockroachdb/cockroach/pkg/sql/opt/optbuilder" "github.com/cockroachdb/cockroach/pkg/sql/opt/xform" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" _ "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/datadriven" "github.com/stretchr/testify/require" ) func TestEval(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() evalCtx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(ctx) walk := func(t *testing.T, getExpr func(*testing.T, *datadriven.TestData) string) { datadriven.Walk(t, filepath.Join("testdata", "eval"), func(t *testing.T, path string) { datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { if d.Cmd != "eval" { t.Fatalf("unsupported command %s", d.Cmd) } return getExpr(t, d) + "\n" }) }) } walkExpr := func(t *testing.T, getExpr func(tree.Expr) (tree.TypedExpr, error)) { walk(t, func(t *testing.T, d *datadriven.TestData) string { expr, err := parser.ParseExpr(d.Input) if err != nil { t.Fatalf("%s: %v", d.Input, err) } e, err := getExpr(expr) if err != nil { return fmt.Sprint(err) } r, err := e.Eval(evalCtx) if err != nil { return fmt.Sprint(err) } return r.String() }) } t.Run("opt", func(t *testing.T) { walkExpr(t, func(e tree.Expr) (tree.TypedExpr, error) { return optBuildScalar(evalCtx, e) }) }) t.Run("no-opt", func(t *testing.T) { walkExpr(t, func(e tree.Expr) (tree.TypedExpr, error) { // expr.TypeCheck to avoid constant folding. typedExpr, err := e.TypeCheck(nil, types.Any) if err != nil { return nil, err } return evalCtx.NormalizeExpr(typedExpr) }) }) // The opt and no-opt tests don't do an end-to-end SQL test. Do that // here by executing a SELECT. In order to make the output be the same // we have to also figure out what the expected output type is so we // can correctly format the datum. t.Run("sql", func(t *testing.T) { s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) walk(t, func(t *testing.T, d *datadriven.TestData) string { var res gosql.NullString if err := sqlDB.QueryRow(fmt.Sprintf("SELECT (%s)::STRING", d.Input)).Scan(&res); err != nil { return strings.TrimPrefix(err.Error(), "pq: ") } if !res.Valid { return "NULL" } // We have a non-null result. We can't just return // res.String here because these strings don't // match the datum.String() representations. For // example, a bitarray has a res.String of something // like `1001001` but the datum representation is // `B'1001001'`. Thus we have to parse res.String (a // SQL result) back into a datum and return that. expr, err := parser.ParseExpr(d.Input) if err != nil { t.Fatal(err) } // expr.TypeCheck to avoid constant folding. typedExpr, err := expr.TypeCheck(nil, types.Any) if err != nil { // An error here should have been found above by QueryRow. t.Fatal(err) } switch typedExpr.ResolvedType().Family() { case types.TupleFamily: // ParseAndRequireString doesn't handle tuples, so we have to convert them ourselves. var datums tree.Datums // Fetch the original expression's tuple values. tuple := typedExpr.(*tree.Tuple) for i, s := range strings.Split(res.String[1:len(res.String)-1], ",") { if s == "" { continue } // Figure out the type of the tuple value. expr, err := tuple.Exprs[i].TypeCheck(nil, types.Any) if err != nil { t.Fatal(err) } // Now parse the new string as the expected type. datum, err := tree.ParseAndRequireString(expr.ResolvedType(), s, evalCtx) if err != nil { t.Errorf("%s: %s", err, s) return err.Error() } datums = append(datums, datum) } return tree.NewDTuple(typedExpr.ResolvedType(), datums...).String() } datum, err := tree.ParseAndRequireString(typedExpr.ResolvedType(), res.String, evalCtx) if err != nil { t.Errorf("%s: %s", err, res.String) return err.Error() } return datum.String() }) }) t.Run("vectorized", func(t *testing.T) { walk(t, func(t *testing.T, d *datadriven.TestData) string { if d.Input == "B'11111111111111111111111110000101'::int4" { // Skip this test: https://github.com/cockroachdb/cockroach/pull/40790#issuecomment-532597294. return strings.TrimSpace(d.Expected) } flowCtx := &execinfra.FlowCtx{ EvalCtx: evalCtx, } memMonitor := execinfra.NewTestMemMonitor(ctx, cluster.MakeTestingClusterSettings()) defer memMonitor.Stop(ctx) acc := memMonitor.MakeBoundAccount() defer acc.Close(ctx) expr, err := parser.ParseExpr(d.Input) require.NoError(t, err) if _, ok := expr.(*tree.RangeCond); ok { // RangeCond gets normalized to comparison expressions and its Eval // method returns an error, so skip it for execution. return strings.TrimSpace(d.Expected) } typedExpr, err := expr.TypeCheck(nil, types.Any) if err != nil { // Skip this test as it's testing an expected error which would be // caught before execution. return strings.TrimSpace(d.Expected) } typs := []types.T{*typedExpr.ResolvedType()} // inputTyps has no relation to the actual expression result type. Used // for generating a batch. inputTyps := []types.T{*types.Int} inputColTyps, err := typeconv.FromColumnTypes(inputTyps) require.NoError(t, err) batchesReturned := 0 args := colexec.NewColOperatorArgs{ Spec: &execinfrapb.ProcessorSpec{ Input: []execinfrapb.InputSyncSpec{{ Type: execinfrapb.InputSyncSpec_UNORDERED, ColumnTypes: inputTyps, }}, Core: execinfrapb.ProcessorCoreUnion{ Noop: &execinfrapb.NoopCoreSpec{}, }, Post: execinfrapb.PostProcessSpec{ RenderExprs: []execinfrapb.Expression{{Expr: d.Input}}, }, }, Inputs: []colexec.Operator{ &colexec.CallbackOperator{ NextCb: func(_ context.Context) coldata.Batch { if batchesReturned > 0 { return coldata.ZeroBatch } // It doesn't matter what types we create the input batch with. batch := coldata.NewMemBatch(inputColTyps) batch.SetLength(1) batchesReturned++ return batch }, }, }, StreamingMemAccount: &acc, // Unsupported post processing specs are wrapped and run through the // row execution engine. ProcessorConstructor: rowexec.NewProcessor, } args.TestingKnobs.UseStreamingMemAccountForBuffering = true result, err := colexec.NewColOperator(ctx, flowCtx, args) if testutils.IsError(err, "unsupported type") { // Skip this test as execution is not supported by the vectorized // engine. return strings.TrimSpace(d.Expected) } else { require.NoError(t, err) } mat, err := colexec.NewMaterializer( flowCtx, 0, /* processorID */ result.Op, typs, &execinfrapb.PostProcessSpec{}, nil, /* output */ nil, /* metadataSourcesQueue */ nil, /* toClose */ nil, /* outputStatsToTrace */ nil, /* cancelFlow */ ) require.NoError(t, err) var ( row sqlbase.EncDatumRow meta *execinfrapb.ProducerMetadata ) ctx = mat.Start(ctx) row, meta = mat.Next() if meta != nil { if meta.Err != nil { return fmt.Sprint(meta.Err) } t.Fatalf("unexpected metadata: %+v", meta) } if row == nil { // Might be some metadata. if meta := mat.DrainHelper(); meta.Err != nil { t.Fatalf("unexpected error: %s", meta.Err) } t.Fatal("unexpected end of input") } return row[0].Datum.String() }) }) } func optBuildScalar(evalCtx *tree.EvalContext, e tree.Expr) (tree.TypedExpr, error) { var o xform.Optimizer o.Init(evalCtx, nil /* catalog */) b := optbuilder.NewScalar(context.TODO(), &tree.SemaContext{}, evalCtx, o.Factory()) b.AllowUnsupportedExpr = true if err := b.Build(e); err != nil { return nil, err } bld := execbuilder.New(nil /* factory */, o.Memo(), nil /* catalog */, o.Memo().RootExpr(), evalCtx) ivh := tree.MakeIndexedVarHelper(nil /* container */, 0) expr, err := bld.BuildScalar(&ivh) if err != nil { return nil, err } return expr, nil } func TestTimeConversion(t *testing.T) { defer leaktest.AfterTest(t)() tests := []struct { start string format string tm string revformat string reverse string }{ // %a %A %b %B (+ %Y) {`Wed Oct 05 2016`, `%a %b %d %Y`, `2016-10-05 00:00:00+00:00`, ``, ``}, {`Wednesday October 05 2016`, `%A %B %d %Y`, `2016-10-05 00:00:00+00:00`, ``, ``}, // %c {`Wed Oct 5 01:02:03 2016`, `%c`, `2016-10-05 01:02:03+00:00`, ``, ``}, // %C %d (+ %m %y) {`20 06 10 12`, `%C %y %m %d`, `2006-10-12 00:00:00+00:00`, ``, ``}, // %D {`10/12/06`, `%D`, `2006-10-12 00:00:00+00:00`, ``, ``}, // %e (+ %Y %m) {`2006 10 3`, `%Y %m %e`, `2006-10-03 00:00:00+00:00`, ``, ``}, // %f (+ %c) {`Wed Oct 5 01:02:03 2016 .123`, `%c .%f`, `2016-10-05 01:02:03.123+00:00`, `.%f`, `.123000`}, {`Wed Oct 5 01:02:03 2016 .123456`, `%c .%f`, `2016-10-05 01:02:03.123456+00:00`, `.%f`, `.123456`}, {`Wed Oct 5 01:02:03 2016 .999999`, `%c .%f`, `2016-10-05 01:02:03.999999+00:00`, `.%f`, `.999999`}, // %F {`2006-10-03`, `%F`, `2006-10-03 00:00:00+00:00`, ``, ``}, // %h (+ %Y %d) {`2006 Oct 03`, `%Y %h %d`, `2006-10-03 00:00:00+00:00`, ``, ``}, // %H (+ %S %M) {`20061012 01:03:02`, `%Y%m%d %H:%S:%M`, `2006-10-12 01:02:03+00:00`, ``, ``}, // %I (+ %Y %m %d) {`20161012 11`, `%Y%m%d %I`, `2016-10-12 11:00:00+00:00`, ``, ``}, // %j (+ %Y) {`2016 286`, `%Y %j`, `2016-10-12 00:00:00+00:00`, ``, ``}, // %k (+ %Y %m %d) {`20061012 23`, `%Y%m%d %k`, `2006-10-12 23:00:00+00:00`, ``, ``}, // %l (+ %Y %m %d %p) {`20061012 5 PM`, `%Y%m%d %l %p`, `2006-10-12 17:00:00+00:00`, ``, ``}, // %n (+ %Y %m %d) {"2006\n10\n03", `%Y%n%m%n%d`, `2006-10-03 00:00:00+00:00`, ``, ``}, // %p cannot be parsed before hour specifiers, so be sure that // they appear in this order. {`20161012 11 PM`, `%Y%m%d %I %p`, `2016-10-12 23:00:00+00:00`, ``, ``}, {`20161012 11 AM`, `%Y%m%d %I %p`, `2016-10-12 11:00:00+00:00`, ``, ``}, // %r {`20161012 11:02:03 PM`, `%Y%m%d %r`, `2016-10-12 23:02:03+00:00`, ``, ``}, // %R {`20161012 11:02`, `%Y%m%d %R`, `2016-10-12 11:02:00+00:00`, ``, ``}, // %s {`1491920586`, `%s`, `2017-04-11 14:23:06+00:00`, ``, ``}, // %t (+ %Y %m %d) {"2006\t10\t03", `%Y%t%m%t%d`, `2006-10-03 00:00:00+00:00`, ``, ``}, // %T (+ %Y %m %d) {`20061012 01:02:03`, `%Y%m%d %T`, `2006-10-12 01:02:03+00:00`, ``, ``}, // %U %u (+ %Y) {`2018 10 4`, `%Y %U %u`, `2018-03-15 00:00:00+00:00`, ``, ``}, // %W %w (+ %Y) {`2018 10 4`, `%Y %W %w`, `2018-03-08 00:00:00+00:00`, ``, ``}, // %x {`10/12/06`, `%x`, `2006-10-12 00:00:00+00:00`, ``, ``}, // %X {`20061012 01:02:03`, `%Y%m%d %X`, `2006-10-12 01:02:03+00:00`, ``, ``}, // %y (+ %m %d) {`000101`, `%y%m%d`, `2000-01-01 00:00:00+00:00`, ``, ``}, {`680101`, `%y%m%d`, `2068-01-01 00:00:00+00:00`, ``, ``}, {`690101`, `%y%m%d`, `1969-01-01 00:00:00+00:00`, ``, ``}, {`990101`, `%y%m%d`, `1999-01-01 00:00:00+00:00`, ``, ``}, // %Y {`19000101`, `%Y%m%d`, `1900-01-01 00:00:00+00:00`, ``, ``}, {`20000101`, `%Y%m%d`, `2000-01-01 00:00:00+00:00`, ``, ``}, {`30000101`, `%Y%m%d`, `3000-01-01 00:00:00+00:00`, ``, ``}, // %z causes the time zone to adjust the time when parsing, but the time zone information // is not retained when printing the timestamp out back. {`20160101 13:00 +0655`, `%Y%m%d %H:%M %z`, `2016-01-01 06:05:00+00:00`, `%Y%m%d %H:%M %z`, `20160101 06:05 +0000`}, } for _, test := range tests { ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer ctx.Mon.Stop(context.Background()) exprStr := fmt.Sprintf("experimental_strptime('%s', '%s')", test.start, test.format) expr, err := parser.ParseExpr(exprStr) if err != nil { t.Errorf("%s: %v", exprStr, err) continue } typedExpr, err := expr.TypeCheck(nil, types.Timestamp) if err != nil { t.Errorf("%s: %v", exprStr, err) continue } r, err := typedExpr.Eval(ctx) if err != nil { t.Errorf("%s: %v", exprStr, err) continue } ts, ok := r.(*tree.DTimestampTZ) if !ok { t.Errorf("%s: result not a timestamp: %s", exprStr, r) continue } tmS := ts.String() tmS = tmS[1 : len(tmS)-1] // strip the quote delimiters if tmS != test.tm { t.Errorf("%s: got %q, expected %q", exprStr, tmS, test.tm) continue } revfmt := test.format if test.revformat != "" { revfmt = test.revformat } ref := test.start if test.reverse != "" { ref = test.reverse } exprStr = fmt.Sprintf("experimental_strftime('%s'::timestamp, '%s')", tmS, revfmt) expr, err = parser.ParseExpr(exprStr) if err != nil { t.Errorf("%s: %v", exprStr, err) continue } typedExpr, err = expr.TypeCheck(nil, types.Timestamp) if err != nil { t.Errorf("%s: %v", exprStr, err) continue } r, err = typedExpr.Eval(ctx) if err != nil { t.Errorf("%s: %v", exprStr, err) continue } rs, ok := r.(*tree.DString) if !ok { t.Errorf("%s: result not a string: %s", exprStr, r) continue } revS := string(*rs) if ref != revS { t.Errorf("%s: got %q, expected %q", exprStr, revS, ref) } } } func TestEvalError(t *testing.T)
{ defer leaktest.AfterTest(t)() testData := []struct { expr string expected string }{ {`1 % 0`, `zero modulus`}, {`1 / 0`, `division by zero`}, {`1::float / 0::float`, `division by zero`}, {`1 // 0`, `division by zero`}, {`1.5 / 0`, `division by zero`}, {`'11h2m'::interval / 0`, `division by zero`}, {`'11h2m'::interval / 0.0::float`, `division by zero`}, {`'???'::bool`, `could not parse "???" as type bool`}, {`'foo'::int`, `could not parse "foo" as type int: strconv.ParseInt: parsing "foo": invalid syntax`}, {`'3\r2'::int`, `could not parse "3\\r2" as type int: strconv.ParseInt: parsing "3\\r2": invalid syntax`}, {`'bar'::float`, `could not parse "bar" as type float: strconv.ParseFloat: parsing "bar": invalid syntax`}, {`'baz'::decimal`, `could not parse "baz" as type decimal`}, {`'2010-09-28 12:00:00.1q'::date`, `parsing as type date: could not parse "2010-09-28 12:00:00.1q"`}, {`'12:00:00q'::time`, `could not parse "12:00:00q" as type time`}, {`'2010-09-28 12:00.1 MST'::timestamp`, `unimplemented: timestamp abbreviations not supported`}, {`'abcd'::interval`, `could not parse "abcd" as type interval: interval: missing unit`}, {`'1- 2:3:4 9'::interval`, `could not parse "1- 2:3:4 9" as type interval: invalid input syntax for type interval 1- 2:3:4 9`}, {`e'\\xdedf0d36174'::BYTES`, `could not parse "\\xdedf0d36174" as type bytes: encoding/hex: odd length hex string`}, {`ARRAY[NULL, ARRAY[1, 2]]`, `multidimensional arrays must have array expressions with matching dimensions`}, {`ARRAY[ARRAY[1, 2], NULL]`, `multidimensional arrays must have array expressions with matching dimensions`}, {`ARRAY[ARRAY[1, 2], ARRAY[1]]`, `multidimensional arrays must have array expressions with matching dimensions`}, // TODO(pmattis): Check for overflow. // {`~0 + 1`, `0`}, {`9223372036854775807::int + 1::int`, `integer out of range`}, {`-9223372036854775807::int + -2::int`, `integer out of range`}, {`-9223372036854775807::int + -9223372036854775807::int`, `integer out of range`}, {`9223372036854775807::int + 9223372036854775807::int`, `integer out of range`}, {`9223372036854775807::int - -1::int`, `integer out of range`}, {`-9223372036854775807::int - 2::int`, `integer out of range`}, {`-9223372036854775807::int - 9223372036854775807::int`, `integer out of range`}, {`9223372036854775807::int - -9223372036854775807::int`, `integer out of range`}, {`4611686018427387904::int * 2::int`, `integer out of range`}, {`4611686018427387904::int * 2::int`, `integer out of range`}, {`(-9223372036854775807:::int - 1) * -1:::int`, `integer out of range`}, {`123 ^ 100`, `integer out of range`}, {`power(123, 100)`, `integer out of range`}, // Although these next two tests are valid integers, a float cannot represent // them exactly, and so rounds them to a larger number that is out of bounds // for an int. Thus, they should fail during this conversion. {`9223372036854775807::float::int`, `integer out of range`}, {`-9223372036854775808::float::int`, `integer out of range`}, // The two smallest floats that cannot be converted to an int. {`9223372036854775296::float::int`, `integer out of range`}, {`-9223372036854775296::float::int`, `integer out of range`}, {`1e500::decimal::int`, `integer out of range`}, {`1e500::decimal::float`, `float out of range`}, {`1e300::decimal::float::int`, `integer out of range`}, {`'Inf'::decimal::int`, `integer out of range`}, {`'NaN'::decimal::int`, `integer out of range`}, {`'Inf'::float::int`, `integer out of range`}, {`'NaN'::float::int`, `integer out of range`}, {`'1.1'::int`, `could not parse "1.1" as type int`}, {`IFERROR(1/0, 123, 'unknown')`, `division by zero`}, {`ISERROR(1/0, 'unknown')`, `division by zero`}, {`like_escape('___', '\___', 'abc')`, `invalid escape string`}, {`like_escape('abc', 'abc', 'a日')`, `invalid escape string`}, {`like_escape('abc', 'abc', '漢日')`, `invalid escape string`}, {`like_escape('__', '_', '_')`, `LIKE pattern must not end with escape character`}, {`like_escape('%%', '%', '%')`, `LIKE pattern must not end with escape character`}, {`like_escape('__', '___', '_')`, `LIKE pattern must not end with escape character`}, {`like_escape('%%', '%%%', '%')`, `LIKE pattern must not end with escape character`}, {`like_escape('abc', 'ab%', '%')`, `LIKE pattern must not end with escape character`}, {`like_escape('abc', '%b%', '%')`, `LIKE pattern must not end with escape character`}, {`like_escape('abc', 'ab_', '_')`, `LIKE pattern must not end with escape character`}, {`like_escape('abc', '%b_', '_')`, `LIKE pattern must not end with escape character`}, {`like_escape('abc', '%b漢', '漢')`, `LIKE pattern must not end with escape character`}, {`similar_to_escape('abc', '-a-b-c', '-')`, `error parsing regexp: invalid escape sequence`}, {`similar_to_escape('a(b)c', '%((_)_', '(')`, `error parsing regexp: unexpected )`}, {`convert_from('\xaaaa'::bytea, 'woo')`, `convert_from(): invalid source encoding name "woo"`}, {`convert_from('\xaaaa'::bytea, 'utf8')`, `convert_from(): invalid byte sequence for encoding "UTF8"`}, {`convert_to('abc', 'woo')`, `convert_to(): invalid destination encoding name "woo"`}, {`convert_to('漢', 'latin1')`, `convert_to(): character '漢' has no representation in encoding "LATIN1"`}, {`'123'::BIT`, `could not parse string as bit array: "2" is not a valid binary digit`}, {`B'1001' & B'101'`, `cannot AND bit strings of different sizes`}, {`B'1001' | B'101'`, `cannot OR bit strings of different sizes`}, {`B'1001' # B'101'`, `cannot XOR bit strings of different sizes`}, } for _, d := range testData { expr, err := parser.ParseExpr(d.expr) if err != nil { t.Fatalf("%s: %v", d.expr, err) } typedExpr, err := tree.TypeCheck(expr, nil, types.Any) if err == nil { evalCtx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) _, err = typedExpr.Eval(evalCtx) } if !testutils.IsError(err, strings.Replace(regexp.QuoteMeta(d.expected), `\.\*`, `.*`, -1)) { t.Errorf("%s: expected %s, but found %v", d.expr, d.expected, err) } } }
core.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub use self::MaybeTyped::*; use rustc_lint; use rustc_driver::driver; use rustc::session::{self, config}; use rustc::session::config::UnstableFeatures; use rustc::middle::{privacy, ty}; use rustc::lint; use rustc_trans::back::link; use rustc_resolve as resolve; use syntax::{ast, ast_map, codemap, diagnostic}; use std::cell::{RefCell, Cell}; use std::collections::{HashMap, HashSet}; use visit_ast::RustdocVisitor; use clean; use clean::Clean; pub use rustc::session::config::Input; pub use rustc::session::search_paths::SearchPaths; /// Are we generating documentation (`Typed`) or tests (`NotTyped`)? pub enum MaybeTyped<'tcx> { Typed(ty::ctxt<'tcx>), NotTyped(session::Session) } pub type ExternalPaths = RefCell<Option<HashMap<ast::DefId, (Vec<String>, clean::TypeKind)>>>; pub struct DocContext<'tcx> { pub krate: &'tcx ast::Crate, pub maybe_typed: MaybeTyped<'tcx>, pub input: Input, pub external_paths: ExternalPaths, pub external_traits: RefCell<Option<HashMap<ast::DefId, clean::Trait>>>, pub external_typarams: RefCell<Option<HashMap<ast::DefId, String>>>, pub inlined: RefCell<Option<HashSet<ast::DefId>>>, pub populated_crate_impls: RefCell<HashSet<ast::CrateNum>>, pub deref_trait_did: Cell<Option<ast::DefId>>, } impl<'tcx> DocContext<'tcx> { pub fn sess<'a>(&'a self) -> &'a session::Session { match self.maybe_typed { Typed(ref tcx) => &tcx.sess, NotTyped(ref sess) => sess } } pub fn tcx_opt<'a>(&'a self) -> Option<&'a ty::ctxt<'tcx>> { match self.maybe_typed { Typed(ref tcx) => Some(tcx), NotTyped(_) => None } } pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { let tcx_opt = self.tcx_opt(); tcx_opt.expect("tcx not present") } } pub struct CrateAnalysis { pub exported_items: privacy::ExportedItems, pub public_items: privacy::PublicItems, pub external_paths: ExternalPaths, pub external_typarams: RefCell<Option<HashMap<ast::DefId, String>>>, pub inlined: RefCell<Option<HashSet<ast::DefId>>>, pub deref_trait_did: Option<ast::DefId>, } pub type Externs = HashMap<String, Vec<String>>; pub fn run_core(search_paths: SearchPaths, cfgs: Vec<String>, externs: Externs, input: Input, triple: Option<String>) -> (clean::Crate, CrateAnalysis) { // Parse, resolve, and typecheck the given crate. let cpath = match input { Input::File(ref p) => Some(p.clone()), _ => None }; let warning_lint = lint::builtin::WARNINGS.name_lower(); let sessopts = config::Options { maybe_sysroot: None, search_paths: search_paths, crate_types: vec!(config::CrateTypeRlib), lint_opts: vec!((warning_lint, lint::Allow)), externs: externs, target_triple: triple.unwrap_or(config::host_triple().to_string()), cfg: config::parse_cfgspecs(cfgs), // Ensure that rustdoc works even if rustc is feature-staged unstable_features: UnstableFeatures::Default, ..config::basic_options().clone() }; let codemap = codemap::CodeMap::new(); let diagnostic_handler = diagnostic::Handler::new(diagnostic::Auto, None, true); let span_diagnostic_handler = diagnostic::SpanHandler::new(diagnostic_handler, codemap); let sess = session::build_session_(sessopts, cpath, span_diagnostic_handler); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); let cfg = config::build_configuration(&sess); let krate = driver::phase_1_parse_input(&sess, cfg, &input); let name = link::find_crate_name(Some(&sess), &krate.attrs, &input); let krate = driver::phase_2_configure_and_expand(&sess, krate, &name, None) .expect("phase_2_configure_and_expand aborted in rustdoc!"); let mut forest = ast_map::Forest::new(krate); let arenas = ty::CtxtArenas::new(); let ast_map = driver::assign_node_ids_and_map(&sess, &mut forest); let ty::CrateAnalysis { exported_items, public_items, ty_cx, .. } = driver::phase_3_run_analysis_passes(sess, ast_map, &arenas, name, resolve::MakeGlobMap::No); let ctxt = DocContext { krate: ty_cx.map.krate(), maybe_typed: Typed(ty_cx), input: input, external_traits: RefCell::new(Some(HashMap::new())), external_typarams: RefCell::new(Some(HashMap::new())), external_paths: RefCell::new(Some(HashMap::new())), inlined: RefCell::new(Some(HashSet::new())), populated_crate_impls: RefCell::new(HashSet::new()), deref_trait_did: Cell::new(None), }; debug!("crate: {:?}", ctxt.krate); let mut analysis = CrateAnalysis { exported_items: exported_items, public_items: public_items, external_paths: RefCell::new(None), external_typarams: RefCell::new(None), inlined: RefCell::new(None), deref_trait_did: None, }; let krate = { let mut v = RustdocVisitor::new(&ctxt, Some(&analysis)); v.visit(ctxt.krate); v.clean(&ctxt) }; let external_paths = ctxt.external_paths.borrow_mut().take(); *analysis.external_paths.borrow_mut() = external_paths; let map = ctxt.external_typarams.borrow_mut().take(); *analysis.external_typarams.borrow_mut() = map; let map = ctxt.inlined.borrow_mut().take(); *analysis.inlined.borrow_mut() = map; analysis.deref_trait_did = ctxt.deref_trait_did.get(); (krate, analysis) }
// file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
msggetcfilters.go
// Copyright (c) 2017 The btcsuite developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wire import ( "io" "github.com/tjaxer/mtcd/chaincfg/chainhash" ) // MaxGetCFiltersReqRange the maximum number of filters that may be requested in // a getcfheaders message. const MaxGetCFiltersReqRange = 1000 // MsgGetCFilters implements the Message interface and represents a bitcoin // getcfilters message. It is used to request committed filters for a range of // blocks. type MsgGetCFilters struct { FilterType FilterType StartHeight uint32 StopHash chainhash.Hash } // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetCFilters) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) error { err := readElement(r, &msg.FilterType) if err != nil { return err } err = readElement(r, &msg.StartHeight) if err != nil { return err } return readElement(r, &msg.StopHash) } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgGetCFilters) BtcEncode(w io.Writer, pver uint32, _ MessageEncoding) error { err := writeElement(w, msg.FilterType) if err != nil { return err } err = writeElement(w, &msg.StartHeight) if err != nil { return err } return writeElement(w, &msg.StopHash) } // Command returns the protocol command string for the message. This is part // of the Message interface implementation. func (msg *MsgGetCFilters) Command() string { return CmdGetCFilters } // MaxPayloadLength returns the maximum length the payload can be for the // receiver. This is part of the Message interface implementation. func (msg *MsgGetCFilters) MaxPayloadLength(pver uint32) uint32 { // Filter type + uint32 + block hash return 1 + 4 + chainhash.HashSize } // NewMsgGetCFilters returns a new bitcoin getcfilters message that conforms to // the Message interface using the passed parameters and defaults for the // remaining fields. func
(filterType FilterType, startHeight uint32, stopHash *chainhash.Hash) *MsgGetCFilters { return &MsgGetCFilters{ FilterType: filterType, StartHeight: startHeight, StopHash: *stopHash, } }
NewMsgGetCFilters
pat.rs
use crate::check::FnCtxt; use rustc_ast as ast; use rustc_ast::util::lev_distance::find_best_match_for_name; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder}; use rustc_hir as hir; use rustc_hir::def::{CtorKind, DefKind, Res}; use rustc_hir::pat_util::EnumerateAndAdjustIterator; use rustc_hir::{HirId, Pat, PatKind}; use rustc_infer::infer; use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; use rustc_middle::ty::subst::GenericArg; use rustc_middle::ty::{self, Adt, BindingMode, Ty, TypeFoldable}; use rustc_span::hygiene::DesugaringKind; use rustc_span::source_map::{Span, Spanned}; use rustc_span::symbol::Ident; use rustc_trait_selection::traits::{ObligationCause, Pattern}; use std::cmp; use std::collections::hash_map::Entry::{Occupied, Vacant}; use super::report_unexpected_variant_res; const CANNOT_IMPLICITLY_DEREF_POINTER_TRAIT_OBJ: &str = "\ This error indicates that a pointer to a trait type cannot be implicitly dereferenced by a \ pattern. Every trait defines a type, but because the size of trait implementors isn't fixed, \ this type has no compile-time size. Therefore, all accesses to trait types must be through \ pointers. If you encounter this error you should try to avoid dereferencing the pointer. You can read more about trait objects in the Trait Objects section of the Reference: \ https://doc.rust-lang.org/reference/types.html#trait-objects"; /// Information about the expected type at the top level of type checking a pattern. /// /// **NOTE:** This is only for use by diagnostics. Do NOT use for type checking logic! #[derive(Copy, Clone)] struct TopInfo<'tcx> { /// The `expected` type at the top level of type checking a pattern. expected: Ty<'tcx>, /// Was the origin of the `span` from a scrutinee expression? /// /// Otherwise there is no scrutinee and it could be e.g. from the type of a formal parameter. origin_expr: bool, /// The span giving rise to the `expected` type, if one could be provided. /// /// If `origin_expr` is `true`, then this is the span of the scrutinee as in: /// /// - `match scrutinee { ... }` /// - `let _ = scrutinee;` /// /// This is used to point to add context in type errors. /// In the following example, `span` corresponds to the `a + b` expression: /// /// ```text /// error[E0308]: mismatched types /// --> src/main.rs:L:C /// | /// L | let temp: usize = match a + b { /// | ----- this expression has type `usize` /// L | Ok(num) => num, /// | ^^^^^^^ expected `usize`, found enum `std::result::Result` /// | /// = note: expected type `usize` /// found type `std::result::Result<_, _>` /// ``` span: Option<Span>, /// This refers to the parent pattern. Used to provide extra diagnostic information on errors. /// ```text /// error[E0308]: mismatched types /// --> $DIR/const-in-struct-pat.rs:8:17 /// | /// L | struct f; /// | --------- unit struct defined here /// ... /// L | let Thing { f } = t; /// | ^ /// | | /// | expected struct `std::string::String`, found struct `f` /// | `f` is interpreted as a unit struct, not a new binding /// | help: bind the struct field to a different name instead: `f: other_f` /// ``` parent_pat: Option<&'tcx Pat<'tcx>>, } impl<'tcx> FnCtxt<'_, 'tcx> { fn pattern_cause(&self, ti: TopInfo<'tcx>, cause_span: Span) -> ObligationCause<'tcx> { let code = Pattern { span: ti.span, root_ty: ti.expected, origin_expr: ti.origin_expr }; self.cause(cause_span, code) } fn demand_eqtype_pat_diag( &self, cause_span: Span, expected: Ty<'tcx>, actual: Ty<'tcx>, ti: TopInfo<'tcx>, ) -> Option<DiagnosticBuilder<'tcx>> { self.demand_eqtype_with_origin(&self.pattern_cause(ti, cause_span), expected, actual) } fn demand_eqtype_pat( &self, cause_span: Span, expected: Ty<'tcx>, actual: Ty<'tcx>, ti: TopInfo<'tcx>, ) { if let Some(mut err) = self.demand_eqtype_pat_diag(cause_span, expected, actual, ti) { err.emit(); } } } const INITIAL_BM: BindingMode = BindingMode::BindByValue(hir::Mutability::Not); /// Mode for adjusting the expected type and binding mode. enum AdjustMode { /// Peel off all immediate reference types. Peel, /// Reset binding mode to the initial mode. Reset, /// Pass on the input binding mode and expected type. Pass, } impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// Type check the given top level pattern against the `expected` type. /// /// If a `Some(span)` is provided and `origin_expr` holds, /// then the `span` represents the scrutinee's span. /// The scrutinee is found in e.g. `match scrutinee { ... }` and `let pat = scrutinee;`. /// /// Otherwise, `Some(span)` represents the span of a type expression /// which originated the `expected` type. pub fn check_pat_top( &self, pat: &'tcx Pat<'tcx>, expected: Ty<'tcx>, span: Option<Span>, origin_expr: bool, ) { let info = TopInfo { expected, origin_expr, span, parent_pat: None }; self.check_pat(pat, expected, INITIAL_BM, info); } /// Type check the given `pat` against the `expected` type /// with the provided `def_bm` (default binding mode). /// /// Outside of this module, `check_pat_top` should always be used. /// Conversely, inside this module, `check_pat_top` should never be used. fn check_pat( &self, pat: &'tcx Pat<'tcx>, expected: Ty<'tcx>, def_bm: BindingMode, ti: TopInfo<'tcx>, ) { debug!("check_pat(pat={:?},expected={:?},def_bm={:?})", pat, expected, def_bm); let path_res = match &pat.kind { PatKind::Path(qpath) => Some(self.resolve_ty_and_res_ufcs(qpath, pat.hir_id, pat.span)), _ => None, }; let adjust_mode = self.calc_adjust_mode(pat, path_res.map(|(res, ..)| res)); let (expected, def_bm) = self.calc_default_binding_mode(pat, expected, def_bm, adjust_mode); let ty = match pat.kind { PatKind::Wild => expected, PatKind::Lit(lt) => self.check_pat_lit(pat.span, lt, expected, ti), PatKind::Range(lhs, rhs, _) => self.check_pat_range(pat.span, lhs, rhs, expected, ti), PatKind::Binding(ba, var_id, _, sub) => { self.check_pat_ident(pat, ba, var_id, sub, expected, def_bm, ti) } PatKind::TupleStruct(ref qpath, subpats, ddpos) => { self.check_pat_tuple_struct(pat, qpath, subpats, ddpos, expected, def_bm, ti) } PatKind::Path(_) => self.check_pat_path(pat, path_res.unwrap(), expected, ti), PatKind::Struct(ref qpath, fields, etc) => { self.check_pat_struct(pat, qpath, fields, etc, expected, def_bm, ti) } PatKind::Or(pats) => { let parent_pat = Some(pat); for pat in pats { self.check_pat(pat, expected, def_bm, TopInfo { parent_pat, ..ti }); } expected } PatKind::Tuple(elements, ddpos) => { self.check_pat_tuple(pat.span, elements, ddpos, expected, def_bm, ti) } PatKind::Box(inner) => self.check_pat_box(pat.span, inner, expected, def_bm, ti), PatKind::Ref(inner, mutbl) => { self.check_pat_ref(pat, inner, mutbl, expected, def_bm, ti) } PatKind::Slice(before, slice, after) => { self.check_pat_slice(pat.span, before, slice, after, expected, def_bm, ti) } }; self.write_ty(pat.hir_id, ty); // (note_1): In most of the cases where (note_1) is referenced // (literals and constants being the exception), we relate types // using strict equality, even though subtyping would be sufficient. // There are a few reasons for this, some of which are fairly subtle // and which cost me (nmatsakis) an hour or two debugging to remember, // so I thought I'd write them down this time. // // 1. There is no loss of expressiveness here, though it does // cause some inconvenience. What we are saying is that the type // of `x` becomes *exactly* what is expected. This can cause unnecessary // errors in some cases, such as this one: // // ``` // fn foo<'x>(x: &'x i32) { // let a = 1; // let mut z = x; // z = &a; // } // ``` // // The reason we might get an error is that `z` might be // assigned a type like `&'x i32`, and then we would have // a problem when we try to assign `&a` to `z`, because // the lifetime of `&a` (i.e., the enclosing block) is // shorter than `'x`. // // HOWEVER, this code works fine. The reason is that the // expected type here is whatever type the user wrote, not // the initializer's type. In this case the user wrote // nothing, so we are going to create a type variable `Z`. // Then we will assign the type of the initializer (`&'x i32`) // as a subtype of `Z`: `&'x i32 <: Z`. And hence we // will instantiate `Z` as a type `&'0 i32` where `'0` is // a fresh region variable, with the constraint that `'x : '0`. // So basically we're all set. // // Note that there are two tests to check that this remains true // (`regions-reassign-{match,let}-bound-pointer.rs`). // // 2. Things go horribly wrong if we use subtype. The reason for // THIS is a fairly subtle case involving bound regions. See the // `givens` field in `region_constraints`, as well as the test // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`, // for details. Short version is that we must sometimes detect // relationships between specific region variables and regions // bound in a closure signature, and that detection gets thrown // off when we substitute fresh region variables here to enable // subtyping. } /// Compute the new expected type and default binding mode from the old ones /// as well as the pattern form we are currently checking. fn calc_default_binding_mode( &self, pat: &'tcx Pat<'tcx>, expected: Ty<'tcx>, def_bm: BindingMode, adjust_mode: AdjustMode, ) -> (Ty<'tcx>, BindingMode) { match adjust_mode { AdjustMode::Pass => (expected, def_bm), AdjustMode::Reset => (expected, INITIAL_BM), AdjustMode::Peel => self.peel_off_references(pat, expected, def_bm), } } /// How should the binding mode and expected type be adjusted? /// /// When the pattern is a path pattern, `opt_path_res` must be `Some(res)`. fn calc_adjust_mode(&self, pat: &'tcx Pat<'tcx>, opt_path_res: Option<Res>) -> AdjustMode { match &pat.kind { // Type checking these product-like types successfully always require // that the expected type be of those types and not reference types. PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Tuple(..) | PatKind::Box(_) | PatKind::Range(..) | PatKind::Slice(..) => AdjustMode::Peel, // String and byte-string literals result in types `&str` and `&[u8]` respectively. // All other literals result in non-reference types. // As a result, we allow `if let 0 = &&0 {}` but not `if let "foo" = &&"foo {}`. PatKind::Lit(lt) => match self.check_expr(lt).kind() { ty::Ref(..) => AdjustMode::Pass, _ => AdjustMode::Peel, }, PatKind::Path(_) => match opt_path_res.unwrap() { // These constants can be of a reference type, e.g. `const X: &u8 = &0;`. // Peeling the reference types too early will cause type checking failures. // Although it would be possible to *also* peel the types of the constants too. Res::Def(DefKind::Const | DefKind::AssocConst, _) => AdjustMode::Pass, // In the `ValueNS`, we have `SelfCtor(..) | Ctor(_, Const), _)` remaining which // could successfully compile. The former being `Self` requires a unit struct. // In either case, and unlike constants, the pattern itself cannot be // a reference type wherefore peeling doesn't give up any expressivity. _ => AdjustMode::Peel, }, // When encountering a `& mut? pat` pattern, reset to "by value". // This is so that `x` and `y` here are by value, as they appear to be: // // ``` // match &(&22, &44) { // (&x, &y) => ... // } // ``` // // See issue #46688. PatKind::Ref(..) => AdjustMode::Reset, // A `_` pattern works with any expected type, so there's no need to do anything. PatKind::Wild // Bindings also work with whatever the expected type is, // and moreover if we peel references off, that will give us the wrong binding type. // Also, we can have a subpattern `binding @ pat`. // Each side of the `@` should be treated independently (like with OR-patterns). | PatKind::Binding(..) // An OR-pattern just propagates to each individual alternative. // This is maximally flexible, allowing e.g., `Some(mut x) | &Some(mut x)`. // In that example, `Some(mut x)` results in `Peel` whereas `&Some(mut x)` in `Reset`. | PatKind::Or(_) => AdjustMode::Pass, } } /// Peel off as many immediately nested `& mut?` from the expected type as possible /// and return the new expected type and binding default binding mode. /// The adjustments vector, if non-empty is stored in a table. fn peel_off_references( &self, pat: &'tcx Pat<'tcx>, expected: Ty<'tcx>, mut def_bm: BindingMode, ) -> (Ty<'tcx>, BindingMode) { let mut expected = self.resolve_vars_with_obligations(&expected); // Peel off as many `&` or `&mut` from the scrutinee type as possible. For example, // for `match &&&mut Some(5)` the loop runs three times, aborting when it reaches // the `Some(5)` which is not of type Ref. // // For each ampersand peeled off, update the binding mode and push the original // type into the adjustments vector. // // See the examples in `ui/match-defbm*.rs`. let mut pat_adjustments = vec![]; while let ty::Ref(_, inner_ty, inner_mutability) = *expected.kind() { debug!("inspecting {:?}", expected); debug!("current discriminant is Ref, inserting implicit deref"); // Preserve the reference type. We'll need it later during THIR lowering. pat_adjustments.push(expected); expected = inner_ty; def_bm = ty::BindByReference(match def_bm { // If default binding mode is by value, make it `ref` or `ref mut` // (depending on whether we observe `&` or `&mut`). ty::BindByValue(_) | // When `ref mut`, stay a `ref mut` (on `&mut`) or downgrade to `ref` (on `&`). ty::BindByReference(hir::Mutability::Mut) => inner_mutability, // Once a `ref`, always a `ref`. // This is because a `& &mut` cannot mutate the underlying value. ty::BindByReference(m @ hir::Mutability::Not) => m, }); } if !pat_adjustments.is_empty() { debug!("default binding mode is now {:?}", def_bm); self.inh .typeck_results .borrow_mut() .pat_adjustments_mut() .insert(pat.hir_id, pat_adjustments); } (expected, def_bm) } fn check_pat_lit( &self, span: Span, lt: &hir::Expr<'tcx>, expected: Ty<'tcx>, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { // We've already computed the type above (when checking for a non-ref pat), // so avoid computing it again. let ty = self.node_ty(lt.hir_id); // Byte string patterns behave the same way as array patterns // They can denote both statically and dynamically-sized byte arrays. let mut pat_ty = ty; if let hir::ExprKind::Lit(Spanned { node: ast::LitKind::ByteStr(_), .. }) = lt.kind { let expected = self.structurally_resolved_type(span, expected); if let ty::Ref(_, inner_ty, _) = expected.kind() { if matches!(inner_ty.kind(), ty::Slice(_)) { let tcx = self.tcx; pat_ty = tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_slice(tcx.types.u8)); } } } // Somewhat surprising: in this case, the subtyping relation goes the // opposite way as the other cases. Actually what we really want is not // a subtyping relation at all but rather that there exists a LUB // (so that they can be compared). However, in practice, constants are // always scalars or strings. For scalars subtyping is irrelevant, // and for strings `ty` is type is `&'static str`, so if we say that // // &'static str <: expected // // then that's equivalent to there existing a LUB. let cause = self.pattern_cause(ti, span); if let Some(mut err) = self.demand_suptype_with_origin(&cause, expected, pat_ty) { err.emit_unless( ti.span .filter(|&s| { // In the case of `if`- and `while`-expressions we've already checked // that `scrutinee: bool`. We know that the pattern is `true`, // so an error here would be a duplicate and from the wrong POV. s.is_desugaring(DesugaringKind::CondTemporary) }) .is_some(), ); } pat_ty } fn check_pat_range( &self, span: Span, lhs: Option<&'tcx hir::Expr<'tcx>>, rhs: Option<&'tcx hir::Expr<'tcx>>, expected: Ty<'tcx>, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { let calc_side = |opt_expr: Option<&'tcx hir::Expr<'tcx>>| match opt_expr { None => (None, None), Some(expr) => { let ty = self.check_expr(expr); // Check that the end-point is of numeric or char type. let fail = !(ty.is_numeric() || ty.is_char() || ty.references_error()); (Some(ty), Some((fail, ty, expr.span))) } }; let (lhs_ty, lhs) = calc_side(lhs); let (rhs_ty, rhs) = calc_side(rhs); if let (Some((true, ..)), _) | (_, Some((true, ..))) = (lhs, rhs) { // There exists a side that didn't meet our criteria that the end-point // be of a numeric or char type, as checked in `calc_side` above. self.emit_err_pat_range(span, lhs, rhs); return self.tcx.ty_error(); } // Now that we know the types can be unified we find the unified type // and use it to type the entire expression. let common_type = self.resolve_vars_if_possible(&lhs_ty.or(rhs_ty).unwrap_or(expected)); // Subtyping doesn't matter here, as the value is some kind of scalar. let demand_eqtype = |x, y| { if let Some((_, x_ty, x_span)) = x { if let Some(mut err) = self.demand_eqtype_pat_diag(x_span, expected, x_ty, ti) { if let Some((_, y_ty, y_span)) = y { self.endpoint_has_type(&mut err, y_span, y_ty); } err.emit(); }; } }; demand_eqtype(lhs, rhs); demand_eqtype(rhs, lhs); common_type } fn endpoint_has_type(&self, err: &mut DiagnosticBuilder<'_>, span: Span, ty: Ty<'_>) { if !ty.references_error() { err.span_label(span, &format!("this is of type `{}`", ty)); } } fn emit_err_pat_range( &self, span: Span, lhs: Option<(bool, Ty<'tcx>, Span)>, rhs: Option<(bool, Ty<'tcx>, Span)>, ) { let span = match (lhs, rhs) { (Some((true, ..)), Some((true, ..))) => span, (Some((true, _, sp)), _) => sp, (_, Some((true, _, sp))) => sp, _ => span_bug!(span, "emit_err_pat_range: no side failed or exists but still error?"), }; let mut err = struct_span_err!( self.tcx.sess, span, E0029, "only `char` and numeric types are allowed in range patterns" ); let msg = |ty| format!("this is of type `{}` but it should be `char` or numeric", ty); let mut one_side_err = |first_span, first_ty, second: Option<(bool, Ty<'tcx>, Span)>| { err.span_label(first_span, &msg(first_ty)); if let Some((_, ty, sp)) = second { self.endpoint_has_type(&mut err, sp, ty); } }; match (lhs, rhs) { (Some((true, lhs_ty, lhs_sp)), Some((true, rhs_ty, rhs_sp))) => { err.span_label(lhs_sp, &msg(lhs_ty)); err.span_label(rhs_sp, &msg(rhs_ty)); } (Some((true, lhs_ty, lhs_sp)), rhs) => one_side_err(lhs_sp, lhs_ty, rhs), (lhs, Some((true, rhs_ty, rhs_sp))) => one_side_err(rhs_sp, rhs_ty, lhs), _ => span_bug!(span, "Impossible, verified above."), } if self.tcx.sess.teach(&err.get_code().unwrap()) { err.note( "In a match expression, only numbers and characters can be matched \ against a range. This is because the compiler checks that the range \ is non-empty at compile-time, and is unable to evaluate arbitrary \ comparison functions. If you want to capture values of an orderable \ type between two end-points, you can use a guard.", ); } err.emit(); } fn check_pat_ident( &self, pat: &'tcx Pat<'tcx>, ba: hir::BindingAnnotation, var_id: HirId, sub: Option<&'tcx Pat<'tcx>>, expected: Ty<'tcx>, def_bm: BindingMode, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { // Determine the binding mode... let bm = match ba { hir::BindingAnnotation::Unannotated => def_bm, _ => BindingMode::convert(ba), }; // ...and store it in a side table: self.inh.typeck_results.borrow_mut().pat_binding_modes_mut().insert(pat.hir_id, bm); debug!("check_pat_ident: pat.hir_id={:?} bm={:?}", pat.hir_id, bm); let local_ty = self.local_ty(pat.span, pat.hir_id).decl_ty; let eq_ty = match bm { ty::BindByReference(mutbl) => { // If the binding is like `ref x | ref mut x`, // then `x` is assigned a value of type `&M T` where M is the // mutability and T is the expected type. // // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` // is required. However, we use equality, which is stronger. // See (note_1) for an explanation. self.new_ref_ty(pat.span, mutbl, expected) } // Otherwise, the type of x is the expected type `T`. ty::BindByValue(_) => { // As above, `T <: typeof(x)` is required, but we use equality, see (note_1). expected } }; self.demand_eqtype_pat(pat.span, eq_ty, local_ty, ti); // If there are multiple arms, make sure they all agree on // what the type of the binding `x` ought to be. if var_id != pat.hir_id { self.check_binding_alt_eq_ty(pat.span, var_id, local_ty, ti); } if let Some(p) = sub { self.check_pat(&p, expected, def_bm, TopInfo { parent_pat: Some(&pat), ..ti }); } local_ty } fn check_binding_alt_eq_ty(&self, span: Span, var_id: HirId, ty: Ty<'tcx>, ti: TopInfo<'tcx>) { let var_ty = self.local_ty(span, var_id).decl_ty; if let Some(mut err) = self.demand_eqtype_pat_diag(span, var_ty, ty, ti) { let hir = self.tcx.hir(); let var_ty = self.resolve_vars_with_obligations(var_ty); let msg = format!("first introduced with type `{}` here", var_ty); err.span_label(hir.span(var_id), msg); let in_match = hir.parent_iter(var_id).any(|(_, n)| { matches!( n, hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Match(.., hir::MatchSource::Normal), .. }) ) }); let pre = if in_match { "in the same arm, " } else { "" }; err.note(&format!("{}a binding must have the same type in all alternatives", pre)); err.emit(); } } fn borrow_pat_suggestion( &self, err: &mut DiagnosticBuilder<'_>, pat: &Pat<'_>, inner: &Pat<'_>, expected: Ty<'tcx>, ) { let tcx = self.tcx; if let PatKind::Binding(..) = inner.kind { let binding_parent_id = tcx.hir().get_parent_node(pat.hir_id); let binding_parent = tcx.hir().get(binding_parent_id); debug!("inner {:?} pat {:?} parent {:?}", inner, pat, binding_parent); match binding_parent { hir::Node::Param(hir::Param { span, .. }) => { if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(inner.span) { err.span_suggestion( *span, &format!("did you mean `{}`", snippet), format!(" &{}", expected), Applicability::MachineApplicable, ); } } hir::Node::Arm(_) | hir::Node::Pat(_) => { // rely on match ergonomics or it might be nested `&&pat` if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(inner.span) { err.span_suggestion( pat.span, "you can probably remove the explicit borrow", snippet, Applicability::MaybeIncorrect, ); } } _ => {} // don't provide suggestions in other cases #55175 } } } pub fn check_dereferenceable(&self, span: Span, expected: Ty<'tcx>, inner: &Pat<'_>) -> bool { if let PatKind::Binding(..) = inner.kind { if let Some(mt) = self.shallow_resolve(expected).builtin_deref(true) { if let ty::Dynamic(..) = mt.ty.kind() { // This is "x = SomeTrait" being reduced from // "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error. let type_str = self.ty_to_string(expected); let mut err = struct_span_err!( self.tcx.sess, span, E0033, "type `{}` cannot be dereferenced", type_str ); err.span_label(span, format!("type `{}` cannot be dereferenced", type_str)); if self.tcx.sess.teach(&err.get_code().unwrap()) { err.note(CANNOT_IMPLICITLY_DEREF_POINTER_TRAIT_OBJ); } err.emit(); return false; } } } true } fn check_pat_struct( &self, pat: &'tcx Pat<'tcx>, qpath: &hir::QPath<'_>, fields: &'tcx [hir::FieldPat<'tcx>], etc: bool, expected: Ty<'tcx>, def_bm: BindingMode, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { // Resolve the path and check the definition for errors. let (variant, pat_ty) = if let Some(variant_ty) = self.check_struct_path(qpath, pat.hir_id) { variant_ty } else { let err = self.tcx.ty_error(); for field in fields { let ti = TopInfo { parent_pat: Some(&pat), ..ti }; self.check_pat(&field.pat, err, def_bm, ti); } return err; }; // Type-check the path. self.demand_eqtype_pat(pat.span, expected, pat_ty, ti); // Type-check subpatterns. if self.check_struct_pat_fields(pat_ty, &pat, variant, fields, etc, def_bm, ti) { pat_ty } else { self.tcx.ty_error() } } fn check_pat_path( &self, pat: &Pat<'_>, path_resolution: (Res, Option<Ty<'tcx>>, &'b [hir::PathSegment<'b>]), expected: Ty<'tcx>, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; // We have already resolved the path. let (res, opt_ty, segments) = path_resolution; match res { Res::Err => { self.set_tainted_by_errors(); return tcx.ty_error(); } Res::Def(DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fictive | CtorKind::Fn), _) => { report_unexpected_variant_res(tcx, res, pat.span); return tcx.ty_error(); } Res::SelfCtor(..) | Res::Def( DefKind::Ctor(_, CtorKind::Const) | DefKind::Const | DefKind::AssocConst | DefKind::ConstParam, _, ) => {} // OK _ => bug!("unexpected pattern resolution: {:?}", res), } // Type-check the path. let (pat_ty, pat_res) = self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.hir_id); if let Some(err) = self.demand_suptype_with_origin(&self.pattern_cause(ti, pat.span), expected, pat_ty) { self.emit_bad_pat_path(err, pat.span, res, pat_res, pat_ty, segments, ti.parent_pat); } pat_ty } fn maybe_suggest_range_literal( &self, e: &mut DiagnosticBuilder<'_>, opt_def_id: Option<hir::def_id::DefId>, ident: Ident, ) -> bool { match opt_def_id { Some(def_id) => match self.tcx.hir().get_if_local(def_id) { Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(_, body_id), .. })) => match self.tcx.hir().get(body_id.hir_id) { hir::Node::Expr(expr) => { if hir::is_range_literal(expr) { let span = self.tcx.hir().span(body_id.hir_id); if let Ok(snip) = self.tcx.sess.source_map().span_to_snippet(span) { e.span_suggestion_verbose( ident.span, "you may want to move the range into the match block", snip, Applicability::MachineApplicable, ); return true; } } } _ => (), }, _ => (), }, _ => (), } false } fn emit_bad_pat_path( &self, mut e: DiagnosticBuilder<'_>, pat_span: Span, res: Res, pat_res: Res, pat_ty: Ty<'tcx>, segments: &'b [hir::PathSegment<'b>], parent_pat: Option<&Pat<'_>>, ) { if let Some(span) = self.tcx.hir().res_span(pat_res) { e.span_label(span, &format!("{} defined here", res.descr())); if let [hir::PathSegment { ident, .. }] = &*segments { e.span_label( pat_span, &format!( "`{}` is interpreted as {} {}, not a new binding", ident, res.article(), res.descr(), ), ); match parent_pat { Some(Pat { kind: hir::PatKind::Struct(..), .. }) => { e.span_suggestion_verbose( ident.span.shrink_to_hi(), "bind the struct field to a different name instead", format!(": other_{}", ident.as_str().to_lowercase()), Applicability::HasPlaceholders, ); } _ => { let (type_def_id, item_def_id) = match pat_ty.kind() { Adt(def, _) => match res { Res::Def(DefKind::Const, def_id) => (Some(def.did), Some(def_id)), _ => (None, None), }, _ => (None, None), }; let ranges = &[ self.tcx.lang_items().range_struct(), self.tcx.lang_items().range_from_struct(), self.tcx.lang_items().range_to_struct(), self.tcx.lang_items().range_full_struct(), self.tcx.lang_items().range_inclusive_struct(), self.tcx.lang_items().range_to_inclusive_struct(), ]; if type_def_id != None && ranges.contains(&type_def_id) { if !self.maybe_suggest_range_literal(&mut e, item_def_id, *ident) { let msg = "constants only support matching by type, \ if you meant to match against a range of values, \ consider using a range pattern like `min ..= max` in the match block"; e.note(msg); } } else { let msg = "introduce a new binding instead"; let sugg = format!("other_{}", ident.as_str().to_lowercase()); e.span_suggestion( ident.span, msg, sugg, Applicability::HasPlaceholders, ); } } }; } } e.emit(); } fn check_pat_tuple_struct( &self, pat: &'tcx Pat<'tcx>, qpath: &hir::QPath<'_>, subpats: &'tcx [&'tcx Pat<'tcx>], ddpos: Option<usize>, expected: Ty<'tcx>, def_bm: BindingMode, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; let on_error = || { let parent_pat = Some(pat); for pat in subpats { self.check_pat(&pat, tcx.ty_error(), def_bm, TopInfo { parent_pat, ..ti }); } }; let report_unexpected_res = |res: Res| { let sm = tcx.sess.source_map(); let path_str = sm .span_to_snippet(sm.span_until_char(pat.span, '(')) .map_or(String::new(), |s| format!(" `{}`", s.trim_end())); let msg = format!( "expected tuple struct or tuple variant, found {}{}", res.descr(), path_str ); let mut err = struct_span_err!(tcx.sess, pat.span, E0164, "{}", msg); match res { Res::Def(DefKind::Fn | DefKind::AssocFn, _) => { err.span_label(pat.span, "`fn` calls are not allowed in patterns"); err.help( "for more information, visit \ https://doc.rust-lang.org/book/ch18-00-patterns.html", ); } _ => { err.span_label(pat.span, "not a tuple variant or struct"); } } err.emit(); on_error(); }; // Resolve the path and check the definition for errors. let (res, opt_ty, segments) = self.resolve_ty_and_res_ufcs(qpath, pat.hir_id, pat.span); if res == Res::Err { self.set_tainted_by_errors(); on_error(); return self.tcx.ty_error(); } // Type-check the path. let (pat_ty, res) = self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.hir_id); if !pat_ty.is_fn() { report_unexpected_res(res); return tcx.ty_error(); } let variant = match res { Res::Err => { self.set_tainted_by_errors(); on_error(); return tcx.ty_error(); } Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) => { report_unexpected_res(res); return tcx.ty_error(); } Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) => tcx.expect_variant_res(res), _ => bug!("unexpected pattern resolution: {:?}", res), }; // Replace constructor type with constructed type for tuple struct patterns. let pat_ty = pat_ty.fn_sig(tcx).output(); let pat_ty = pat_ty.no_bound_vars().expect("expected fn type"); // Type-check the tuple struct pattern against the expected type. let diag = self.demand_eqtype_pat_diag(pat.span, expected, pat_ty, ti); let had_err = if let Some(mut err) = diag { err.emit(); true } else { false }; // Type-check subpatterns. if subpats.len() == variant.fields.len() || subpats.len() < variant.fields.len() && ddpos.is_some() { let substs = match pat_ty.kind() { ty::Adt(_, substs) => substs, _ => bug!("unexpected pattern type {:?}", pat_ty), }; for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) { let field_ty = self.field_ty(subpat.span, &variant.fields[i], substs); self.check_pat(&subpat, field_ty, def_bm, TopInfo { parent_pat: Some(&pat), ..ti }); self.tcx.check_stability(variant.fields[i].did, Some(pat.hir_id), subpat.span); } } else { // Pattern has wrong number of fields. self.e0023(pat.span, res, qpath, subpats, &variant.fields, expected, had_err); on_error(); return tcx.ty_error(); } pat_ty } fn e0023( &self, pat_span: Span, res: Res, qpath: &hir::QPath<'_>, subpats: &'tcx [&'tcx Pat<'tcx>], fields: &'tcx [ty::FieldDef], expected: Ty<'tcx>, had_err: bool, ) { let subpats_ending = pluralize!(subpats.len()); let fields_ending = pluralize!(fields.len()); let res_span = self.tcx.def_span(res.def_id()); let mut err = struct_span_err!( self.tcx.sess, pat_span, E0023, "this pattern has {} field{}, but the corresponding {} has {} field{}", subpats.len(), subpats_ending, res.descr(), fields.len(), fields_ending, ); err.span_label( pat_span, format!("expected {} field{}, found {}", fields.len(), fields_ending, subpats.len(),), ) .span_label(res_span, format!("{} defined here", res.descr())); // Identify the case `Some(x, y)` where the expected type is e.g. `Option<(T, U)>`. // More generally, the expected type wants a tuple variant with one field of an // N-arity-tuple, e.g., `V_i((p_0, .., p_N))`. Meanwhile, the user supplied a pattern // with the subpatterns directly in the tuple variant pattern, e.g., `V_i(p_0, .., p_N)`. let missing_parenthesis = match (&expected.kind(), fields, had_err) { // #67037: only do this if we could successfully type-check the expected type against // the tuple struct pattern. Otherwise the substs could get out of range on e.g., // `let P() = U;` where `P != U` with `struct P<T>(T);`. (ty::Adt(_, substs), [field], false) => { let field_ty = self.field_ty(pat_span, field, substs); match field_ty.kind() { ty::Tuple(_) => field_ty.tuple_fields().count() == subpats.len(), _ => false, } } _ => false, }; if missing_parenthesis { let (left, right) = match subpats { // This is the zero case; we aim to get the "hi" part of the `QPath`'s // span as the "lo" and then the "hi" part of the pattern's span as the "hi". // This looks like: // // help: missing parenthesis // | // L | let A(()) = A(()); // | ^ ^ [] => (qpath.span().shrink_to_hi(), pat_span), // Easy case. Just take the "lo" of the first sub-pattern and the "hi" of the // last sub-pattern. In the case of `A(x)` the first and last may coincide. // This looks like: // // help: missing parenthesis // | // L | let A((x, y)) = A((1, 2)); // | ^ ^ [first, ..] => (first.span.shrink_to_lo(), subpats.last().unwrap().span), }; err.multipart_suggestion( "missing parenthesis", vec![(left, "(".to_string()), (right.shrink_to_hi(), ")".to_string())], Applicability::MachineApplicable, ); } err.emit(); } fn check_pat_tuple( &self, span: Span, elements: &'tcx [&'tcx Pat<'tcx>], ddpos: Option<usize>, expected: Ty<'tcx>, def_bm: BindingMode, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; let mut expected_len = elements.len(); if ddpos.is_some() { // Require known type only when `..` is present. if let ty::Tuple(ref tys) = self.structurally_resolved_type(span, expected).kind() { expected_len = tys.len(); } } let max_len = cmp::max(expected_len, elements.len()); let element_tys_iter = (0..max_len).map(|_| { GenericArg::from(self.next_ty_var( // FIXME: `MiscVariable` for now -- obtaining the span and name information // from all tuple elements isn't trivial. TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span }, )) }); let element_tys = tcx.mk_substs(element_tys_iter); let pat_ty = tcx.mk_ty(ty::Tuple(element_tys)); if let Some(mut err) = self.demand_eqtype_pat_diag(span, expected, pat_ty, ti) { err.emit(); // Walk subpatterns with an expected type of `err` in this case to silence // further errors being emitted when using the bindings. #50333 let element_tys_iter = (0..max_len).map(|_| tcx.ty_error()); for (_, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { self.check_pat(elem, &tcx.ty_error(), def_bm, ti); } tcx.mk_tup(element_tys_iter) } else { for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { self.check_pat(elem, &element_tys[i].expect_ty(), def_bm, ti); } pat_ty } } fn check_struct_pat_fields( &self, adt_ty: Ty<'tcx>, pat: &'tcx Pat<'tcx>, variant: &'tcx ty::VariantDef, fields: &'tcx [hir::FieldPat<'tcx>], etc: bool, def_bm: BindingMode, ti: TopInfo<'tcx>, ) -> bool { let tcx = self.tcx; let (substs, adt) = match adt_ty.kind() { ty::Adt(adt, substs) => (substs, adt), _ => span_bug!(pat.span, "struct pattern is not an ADT"), }; // Index the struct fields' types. let field_map = variant .fields .iter() .enumerate() .map(|(i, field)| (field.ident.normalize_to_macros_2_0(), (i, field))) .collect::<FxHashMap<_, _>>(); // Keep track of which fields have already appeared in the pattern. let mut used_fields = FxHashMap::default(); let mut no_field_errors = true; let mut inexistent_fields = vec![]; // Typecheck each field. for field in fields { let span = field.span; let ident = tcx.adjust_ident(field.ident, variant.def_id); let field_ty = match used_fields.entry(ident) { Occupied(occupied) => { self.error_field_already_bound(span, field.ident, *occupied.get()); no_field_errors = false; tcx.ty_error() } Vacant(vacant) => { vacant.insert(span); field_map .get(&ident) .map(|(i, f)| { self.write_field_index(field.hir_id, *i); self.tcx.check_stability(f.did, Some(pat.hir_id), span); self.field_ty(span, f, substs) }) .unwrap_or_else(|| { inexistent_fields.push(field.ident); no_field_errors = false; tcx.ty_error() }) } }; self.check_pat(&field.pat, field_ty, def_bm, TopInfo { parent_pat: Some(&pat), ..ti }); } let mut unmentioned_fields = variant .fields .iter() .map(|field| (field, field.ident.normalize_to_macros_2_0())) .filter(|(_, ident)| !used_fields.contains_key(&ident)) .collect::<Vec<_>>(); let inexistent_fields_err = if !(inexistent_fields.is_empty() || variant.is_recovered()) { Some(self.error_inexistent_fields( adt.variant_descr(), &inexistent_fields, &mut unmentioned_fields, variant, )) } else { None }; // Require `..` if struct has non_exhaustive attribute. if variant.is_field_list_non_exhaustive() && !adt.did.is_local() && !etc { self.error_foreign_non_exhaustive_spat(pat, adt.variant_descr(), fields.is_empty()); } let mut unmentioned_err = None; // Report an error if incorrect number of the fields were specified. if adt.is_union() { if fields.len() != 1 { tcx.sess .struct_span_err(pat.span, "union patterns should have exactly one field") .emit(); } if etc { tcx.sess.struct_span_err(pat.span, "`..` cannot be used in union patterns").emit(); } } else if !etc && !unmentioned_fields.is_empty() { let no_accessible_unmentioned_fields = unmentioned_fields .iter() .find(|(field, _)| { field.vis.is_accessible_from(tcx.parent_module(pat.hir_id).to_def_id(), tcx) }) .is_none(); if no_accessible_unmentioned_fields { unmentioned_err = Some(self.error_no_accessible_fields(pat, &fields)); } else { unmentioned_err = Some(self.error_unmentioned_fields(pat, &unmentioned_fields, &fields)); } } match (inexistent_fields_err, unmentioned_err) { (Some(mut i), Some(mut u)) => { if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) { // We don't want to show the inexistent fields error when this was // `Foo { a, b }` when it should have been `Foo(a, b)`. i.delay_as_bug(); u.delay_as_bug(); e.emit(); } else { i.emit(); u.emit(); } } (None, Some(mut err)) | (Some(mut err), None) => { err.emit(); } (None, None) => {} } no_field_errors } fn error_foreign_non_exhaustive_spat(&self, pat: &Pat<'_>, descr: &str, no_fields: bool) { let sess = self.tcx.sess; let sm = sess.source_map(); let sp_brace = sm.end_point(pat.span); let sp_comma = sm.end_point(pat.span.with_hi(sp_brace.hi())); let sugg = if no_fields || sp_brace != sp_comma { ".. }" } else { ", .. }" }; let mut err = struct_span_err!( sess, pat.span, E0638, "`..` required with {} marked as non-exhaustive", descr ); err.span_suggestion_verbose( sp_comma, "add `..` at the end of the field list to ignore all other fields", sugg.to_string(), Applicability::MachineApplicable, ); err.emit(); } fn error_field_already_bound(&self, span: Span, ident: Ident, other_field: Span) { struct_span_err!( self.tcx.sess, span, E0025, "field `{}` bound multiple times in the pattern", ident ) .span_label(span, format!("multiple uses of `{}` in pattern", ident)) .span_label(other_field, format!("first use of `{}`", ident)) .emit(); } fn error_inexistent_fields( &self, kind_name: &str, inexistent_fields: &[Ident], unmentioned_fields: &mut Vec<(&ty::FieldDef, Ident)>, variant: &ty::VariantDef, ) -> DiagnosticBuilder<'tcx> { let tcx = self.tcx; let (field_names, t, plural) = if inexistent_fields.len() == 1 { (format!("a field named `{}`", inexistent_fields[0]), "this", "") } else { ( format!( "fields named {}", inexistent_fields .iter() .map(|ident| format!("`{}`", ident)) .collect::<Vec<String>>() .join(", ") ), "these", "s", ) }; let spans = inexistent_fields.iter().map(|ident| ident.span).collect::<Vec<_>>(); let mut err = struct_span_err!( tcx.sess, spans, E0026, "{} `{}` does not have {}", kind_name, tcx.def_path_str(variant.def_id), field_names ); if let Some(ident) = inexistent_fields.last() { err.span_label( ident.span, format!( "{} `{}` does not have {} field{}", kind_name, tcx.def_path_str(variant.def_id), t, plural ), ); if plural == "" { let input = unmentioned_fields.iter().map(|(_, field)| &field.name); let suggested_name = find_best_match_for_name(input, ident.name, None); if let Some(suggested_name) = suggested_name { err.span_suggestion( ident.span, "a field with a similar name exists", suggested_name.to_string(), Applicability::MaybeIncorrect, ); // When we have a tuple struct used with struct we don't want to suggest using // the (valid) struct syntax with numeric field names. Instead we want to // suggest the expected syntax. We infer that this is the case by parsing the // `Ident` into an unsized integer. The suggestion will be emitted elsewhere in // `smart_resolve_context_dependent_help`. if suggested_name.to_ident_string().parse::<usize>().is_err() { // We don't want to throw `E0027` in case we have thrown `E0026` for them. unmentioned_fields.retain(|&(_, x)| x.name != suggested_name); } } } } if tcx.sess.teach(&err.get_code().unwrap()) { err.note( "This error indicates that a struct pattern attempted to \ extract a non-existent field from a struct. Struct fields \ are identified by the name used before the colon : so struct \ patterns should resemble the declaration of the struct type \ being matched.\n\n\ If you are using shorthand field patterns but want to refer \ to the struct field by a different name, you should rename \ it explicitly.", ); } err } fn error_tuple_variant_as_struct_pat( &self, pat: &Pat<'_>, fields: &'tcx [hir::FieldPat<'tcx>], variant: &ty::VariantDef, ) -> Option<DiagnosticBuilder<'tcx>> { if let (CtorKind::Fn, PatKind::Struct(qpath, ..)) = (variant.ctor_kind, &pat.kind) { let path = rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| { s.print_qpath(qpath, false) }); let mut err = struct_span_err!( self.tcx.sess, pat.span, E0769, "tuple variant `{}` written as struct variant", path ); let (sugg, appl) = if fields.len() == variant.fields.len() { ( fields .iter() .map(|f| match self.tcx.sess.source_map().span_to_snippet(f.pat.span) { Ok(f) => f, Err(_) => rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| { s.print_pat(f.pat) }), }) .collect::<Vec<String>>() .join(", "), Applicability::MachineApplicable, ) } else { ( variant.fields.iter().map(|_| "_").collect::<Vec<&str>>().join(", "), Applicability::MaybeIncorrect, ) }; err.span_suggestion( pat.span, "use the tuple variant pattern syntax instead", format!("{}({})", path, sugg), appl, ); return Some(err); } None } /// Returns a diagnostic reporting a struct pattern which is missing an `..` due to /// inaccessible fields. /// /// ```text /// error: pattern requires `..` due to inaccessible fields /// --> src/main.rs:10:9 /// | /// LL | let foo::Foo {} = foo::Foo::default(); /// | ^^^^^^^^^^^ /// | /// help: add a `..` /// | /// LL | let foo::Foo { .. } = foo::Foo::default(); /// | ^^^^^^ /// ``` fn error_no_accessible_fields( &self, pat: &Pat<'_>, fields: &'tcx [hir::FieldPat<'tcx>], ) -> DiagnosticBuilder<'tcx> { let mut err = self .tcx .sess .struct_span_err(pat.span, "pattern requires `..` due to inaccessible fields"); if let Some(field) = fields.last() { err.span_suggestion_verbose( field.span.shrink_to_hi(), "ignore the inaccessible and unused fields", ", ..".to_string(), Applicability::MachineApplicable, ); } else { let qpath_span = if let PatKind::Struct(qpath, ..) = &pat.kind { qpath.span() } else { bug!("`error_no_accessible_fields` called on non-struct pattern"); }; // Shrink the span to exclude the `foo:Foo` in `foo::Foo { }`. let span = pat.span.with_lo(qpath_span.shrink_to_hi().hi()); err.span_suggestion_verbose( span, "ignore the inaccessible and unused fields", " { .. }".to_string(), Applicability::MachineApplicable, ); } err } /// Returns a diagnostic reporting a struct pattern which does not mention some fields. /// /// ```text /// error[E0027]: pattern does not mention field `you_cant_use_this_field` /// --> src/main.rs:15:9 /// | /// LL | let foo::Foo {} = foo::Foo::new(); /// | ^^^^^^^^^^^ missing field `you_cant_use_this_field` /// ``` fn error_unmentioned_fields( &self, pat: &Pat<'_>, unmentioned_fields: &[(&ty::FieldDef, Ident)], fields: &'tcx [hir::FieldPat<'tcx>], ) -> DiagnosticBuilder<'tcx> { let field_names = if unmentioned_fields.len() == 1 { format!("field `{}`", unmentioned_fields[0].1) } else { let fields = unmentioned_fields .iter() .map(|(_, name)| format!("`{}`", name)) .collect::<Vec<String>>() .join(", "); format!("fields {}", fields) }; let mut err = struct_span_err!( self.tcx.sess, pat.span, E0027, "pattern does not mention {}", field_names ); err.span_label(pat.span, format!("missing {}", field_names)); let len = unmentioned_fields.len(); let (prefix, postfix, sp) = match fields { [] => match &pat.kind { PatKind::Struct(path, [], false) => { (" { ", " }", path.span().shrink_to_hi().until(pat.span.shrink_to_hi())) } _ => return err, }, [.., field] => ( match pat.kind { PatKind::Struct(_, [_, ..], _) => ", ", _ => "", }, "", field.span.shrink_to_hi(), ), }; err.span_suggestion( sp, &format!( "include the missing field{} in the pattern", if len == 1 { "" } else { "s" }, ), format!( "{}{}{}", prefix, unmentioned_fields .iter() .map(|(_, name)| name.to_string()) .collect::<Vec<_>>() .join(", "), postfix, ), Applicability::MachineApplicable, ); err.span_suggestion( sp, &format!( "if you don't care about {} missing field{}, you can explicitly ignore {}", if len == 1 { "this" } else { "these" }, if len == 1 { "" } else { "s" }, if len == 1 { "it" } else { "them" }, ), format!("{}..{}", prefix, postfix), Applicability::MachineApplicable, ); err } fn check_pat_box( &self, span: Span, inner: &'tcx Pat<'tcx>, expected: Ty<'tcx>, def_bm: BindingMode, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; let (box_ty, inner_ty) = if self.check_dereferenceable(span, expected, &inner) { // Here, `demand::subtype` is good enough, but I don't // think any errors can be introduced by using `demand::eqtype`. let inner_ty = self.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span: inner.span, }); let box_ty = tcx.mk_box(inner_ty); self.demand_eqtype_pat(span, expected, box_ty, ti); (box_ty, inner_ty) } else { let err = tcx.ty_error(); (err, err) }; self.check_pat(&inner, inner_ty, def_bm, ti); box_ty } fn check_pat_ref( &self, pat: &'tcx Pat<'tcx>, inner: &'tcx Pat<'tcx>, mutbl: hir::Mutability, expected: Ty<'tcx>, def_bm: BindingMode, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; let expected = self.shallow_resolve(expected); let (rptr_ty, inner_ty) = if self.check_dereferenceable(pat.span, expected, &inner) { // `demand::subtype` would be good enough, but using `eqtype` turns // out to be equally general. See (note_1) for details. // Take region, inner-type from expected type if we can, // to avoid creating needless variables. This also helps with // the bad interactions of the given hack detailed in (note_1). debug!("check_pat_ref: expected={:?}", expected); match *expected.kind() { ty::Ref(_, r_ty, r_mutbl) if r_mutbl == mutbl => (expected, r_ty), _ => { let inner_ty = self.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span: inner.span, }); let rptr_ty = self.new_ref_ty(pat.span, mutbl, inner_ty); debug!("check_pat_ref: demanding {:?} = {:?}", expected, rptr_ty); let err = self.demand_eqtype_pat_diag(pat.span, expected, rptr_ty, ti); // Look for a case like `fn foo(&foo: u32)` and suggest // `fn foo(foo: &u32)` if let Some(mut err) = err { self.borrow_pat_suggestion(&mut err, &pat, &inner, &expected); err.emit(); } (rptr_ty, inner_ty) } } } else { let err = tcx.ty_error(); (err, err) }; self.check_pat(&inner, inner_ty, def_bm, TopInfo { parent_pat: Some(&pat), ..ti }); rptr_ty } /// Create a reference type with a fresh region variable. fn new_ref_ty(&self, span: Span, mutbl: hir::Mutability, ty: Ty<'tcx>) -> Ty<'tcx> { let region = self.next_region_var(infer::PatternRegion(span)); let mt = ty::TypeAndMut { ty, mutbl }; self.tcx.mk_ref(region, mt) } /// Type check a slice pattern. /// /// Syntactically, these look like `[pat_0, ..., pat_n]`. /// Semantically, we are type checking a pattern with structure: /// ``` /// [before_0, ..., before_n, (slice, after_0, ... after_n)?] /// ``` /// The type of `slice`, if it is present, depends on the `expected` type. /// If `slice` is missing, then so is `after_i`. /// If `slice` is present, it can still represent 0 elements. fn check_pat_slice( &self, span: Span, before: &'tcx [&'tcx Pat<'tcx>], slice: Option<&'tcx Pat<'tcx>>, after: &'tcx [&'tcx Pat<'tcx>], expected: Ty<'tcx>, def_bm: BindingMode, ti: TopInfo<'tcx>, ) -> Ty<'tcx> { let expected = self.structurally_resolved_type(span, expected); let (element_ty, opt_slice_ty, inferred) = match *expected.kind() { // An array, so we might have something like `let [a, b, c] = [0, 1, 2];`. ty::Array(element_ty, len) => { let min = before.len() as u64 + after.len() as u64; let (opt_slice_ty, expected) = self.check_array_pat_len(span, element_ty, expected, slice, len, min); // `opt_slice_ty.is_none()` => `slice.is_none()`. // Note, though, that opt_slice_ty could be `Some(error_ty)`. assert!(opt_slice_ty.is_some() || slice.is_none()); (element_ty, opt_slice_ty, expected) } ty::Slice(element_ty) => (element_ty, Some(expected), expected), // The expected type must be an array or slice, but was neither, so error. _ => { if !expected.references_error() { self.error_expected_array_or_slice(span, expected); } let err = self.tcx.ty_error(); (err, Some(err), err) } }; // Type check all the patterns before `slice`. for elt in before { self.check_pat(&elt, element_ty, def_bm, ti); } // Type check the `slice`, if present, against its expected type. if let Some(slice) = slice { self.check_pat(&slice, opt_slice_ty.unwrap(), def_bm, ti); } // Type check the elements after `slice`, if present. for elt in after { self.check_pat(&elt, element_ty, def_bm, ti); } inferred } /// Type check the length of an array pattern. /// /// Returns both the type of the variable length pattern (or `None`), and the potentially /// inferred array type. We only return `None` for the slice type if `slice.is_none()`. fn check_array_pat_len( &self, span: Span, element_ty: Ty<'tcx>, arr_ty: Ty<'tcx>, slice: Option<&'tcx Pat<'tcx>>, len: &ty::Const<'tcx>, min_len: u64, ) -> (Option<Ty<'tcx>>, Ty<'tcx>) { if let Some(len) = len.try_eval_usize(self.tcx, self.param_env) { // Now we know the length... if slice.is_none() { // ...and since there is no variable-length pattern, // we require an exact match between the number of elements // in the array pattern and as provided by the matched type. if min_len == len { return (None, arr_ty); } self.error_scrutinee_inconsistent_length(span, min_len, len); } else if let Some(pat_len) = len.checked_sub(min_len) { // The variable-length pattern was there, // so it has an array type with the remaining elements left as its size... return (Some(self.tcx.mk_array(element_ty, pat_len)), arr_ty); } else { // ...however, in this case, there were no remaining elements. // That is, the slice pattern requires more than the array type offers. self.error_scrutinee_with_rest_inconsistent_length(span, min_len, len); } } else if slice.is_none() { // We have a pattern with a fixed length, // which we can use to infer the length of the array. let updated_arr_ty = self.tcx.mk_array(element_ty, min_len); self.demand_eqtype(span, updated_arr_ty, arr_ty); return (None, updated_arr_ty); } else { // We have a variable-length pattern and don't know the array length. // This happens if we have e.g., // `let [a, b, ..] = arr` where `arr: [T; N]` where `const N: usize`. self.error_scrutinee_unfixed_length(span); } // If we get here, we must have emitted an error. (Some(self.tcx.ty_error()), arr_ty) } fn error_scrutinee_inconsistent_length(&self, span: Span, min_len: u64, size: u64) { struct_span_err!( self.tcx.sess, span, E0527, "pattern requires {} element{} but array has {}", min_len, pluralize!(min_len), size, ) .span_label(span, format!("expected {} element{}", size, pluralize!(size))) .emit(); } fn error_scrutinee_with_rest_inconsistent_length(&self, span: Span, min_len: u64, size: u64) { struct_span_err!( self.tcx.sess, span, E0528, "pattern requires at least {} element{} but array has {}", min_len, pluralize!(min_len), size, ) .span_label( span, format!("pattern cannot match array of {} element{}", size, pluralize!(size),), ) .emit(); } fn error_scrutinee_unfixed_length(&self, span: Span) { struct_span_err!( self.tcx.sess, span, E0730, "cannot pattern-match on an array without a fixed length", ) .emit(); } fn error_expected_array_or_slice(&self, span: Span, expected_ty: Ty<'tcx>) { let mut err = struct_span_err!( self.tcx.sess, span, E0529, "expected an array or slice, found `{}`", expected_ty
err.help("the semantics of slice patterns changed recently; see issue #62254"); } } err.span_label(span, format!("pattern cannot match with input type `{}`", expected_ty)); err.emit(); } }
); if let ty::Ref(_, ty, _) = expected_ty.kind() { if let ty::Array(..) | ty::Slice(..) = ty.kind() {
CalculateTotalRenderable.ts
import { IWorldRenderData } from './IWorldRenderData';
{ renderData.numRendered++; renderData.numRenderable++; if (entry.node.dirtyFrame >= renderData.gameFrame) { renderData.dirtyFrame++; } entry.children.forEach(child => { if (child.children.length > 0) { CalculateTotalRenderable(child, renderData); } }); }
import { SearchEntry } from '../display/SearchEntryType'; export function CalculateTotalRenderable (entry: SearchEntry, renderData: IWorldRenderData): void
rpc.py
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Specification: # - msgpack # https://github.com/msgpack/msgpack/blob/master/spec.md # - msgpack-rpc # https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md from collections import deque import select import msgpack import six class MessageType(object): REQUEST = 0 RESPONSE = 1 NOTIFY = 2 class MessageEncoder(object): """msgpack-rpc encoder/decoder. intended to be transport-agnostic. """ def __init__(self): super(MessageEncoder, self).__init__() if msgpack.version >= (1, 0, 0): self._packer = msgpack.Packer() # The strict_map_key=False option is required to use int keys in # maps; it is disabled by default to prevent hash collision denial # of service attacks (hashdos) in scenarios where an attacker can # control the keys to be hashed. self._unpacker = msgpack.Unpacker(strict_map_key=False) else: self._packer = msgpack.Packer(encoding='utf-8', use_bin_type=True) self._unpacker = msgpack.Unpacker(encoding='utf-8') self._next_msgid = 0 def _create_msgid(self): this_id = self._next_msgid self._next_msgid = (self._next_msgid + 1) % 0xffffffff return this_id def create_request(self, method, params): assert isinstance(method, (str, six.binary_type)) assert isinstance(params, list) msgid = self._create_msgid() return (self._packer.pack( [MessageType.REQUEST, msgid, method, params]), msgid) def create_response(self, msgid, error=None, result=None): assert isinstance(msgid, int) assert 0 <= msgid <= 0xffffffff assert error is None or result is None return self._packer.pack([MessageType.RESPONSE, msgid, error, result]) def create_notification(self, method, params): assert isinstance(method, (str, six.binary_type)) assert isinstance(params, list) return self._packer.pack([MessageType.NOTIFY, method, params]) def get_and_dispatch_messages(self, data, disp_table): """dissect messages from a raw stream data. disp_table[type] should be a callable for the corresponding MessageType. """ self._unpacker.feed(data) for m in self._unpacker: self._dispatch_message(m, disp_table) @staticmethod def _dispatch_message(m, disp_table): # XXX validation t = m[0] try: f = disp_table[t] except KeyError: # ignore messages with unknown type return f(m[1:]) class EndPoint(object): """An endpoint *sock* is a socket-like. it can be either blocking or non-blocking. """ def __init__(self, sock, encoder=None, disp_table=None): if encoder is None: encoder = MessageEncoder() self._encoder = encoder self._sock = sock if disp_table is None: self._table = { MessageType.REQUEST: self._enqueue_incoming_request, MessageType.RESPONSE: self._enqueue_incoming_response, MessageType.NOTIFY: self._enqueue_incoming_notification } else: self._table = disp_table self._send_buffer = bytearray() # msgids for which we sent a request but have not received a response self._pending_requests = set() # queues for incoming messages self._requests = deque() self._notifications = deque() self._responses = {} self._incoming = 0 # number of incoming messages in our queues self._closed_by_peer = False def selectable(self): rlist = [self._sock] wlist = [] if self._send_buffer: wlist.append(self._sock) return rlist, wlist def process_outgoing(self): try: sent_bytes = self._sock.send(self._send_buffer) except IOError: sent_bytes = 0 del self._send_buffer[:sent_bytes] def process_incoming(self): self.receive_messages(all=True) def process(self): self.process_outgoing() self.process_incoming() def block(self): rlist, wlist = self.selectable() select.select(rlist, wlist, rlist + wlist) def serve(self): while not self._closed_by_peer: self.block() self.process() def _send_message(self, msg): self._send_buffer += msg self.process_outgoing() def send_request(self, method, params): """Send a request """ msg, msgid = self._encoder.create_request(method, params) self._send_message(msg) self._pending_requests.add(msgid) return msgid def send_response(self, msgid, error=None, result=None): """Send a response """ msg = self._encoder.create_response(msgid, error, result) self._send_message(msg) def send_notification(self, method, params): """Send a notification """ msg = self._encoder.create_notification(method, params) self._send_message(msg) def receive_messages(self, all=False): """Try to receive some messages. Received messages are put on the internal queues. They can be retrieved using get_xxx() methods. Returns True if there's something queued for get_xxx() methods. """ while all or self._incoming == 0: try: packet = self._sock.recv(4096) # XXX the size is arbitrary except IOError: packet = None if not packet: if packet is not None: # socket closed by peer self._closed_by_peer = True break self._encoder.get_and_dispatch_messages(packet, self._table) return self._incoming > 0 def _enqueue_incoming_request(self, m): self._requests.append(m) self._incoming += 1 def _enqueue_incoming_response(self, m): msgid, error, result = m try: self._pending_requests.remove(msgid) except KeyError: # bogus msgid # XXXwarn return assert msgid not in self._responses self._responses[msgid] = (error, result) self._incoming += 1 def _enqueue_incoming_notification(self, m): self._notifications.append(m) self._incoming += 1 def _get_message(self, q): try: m = q.popleft() assert self._incoming > 0 self._incoming -= 1 return m except IndexError: return None def get_request(self): return self._get_message(self._requests) def get_response(self, msgid): try: m = self._responses.pop(msgid) assert self._incoming > 0 self._incoming -= 1 except KeyError: return None error, result = m return result, error def get_notification(self): return self._get_message(self._notifications) class
(Exception): """an error from server """ def __init__(self, error): super(RPCError, self).__init__() self._error = error def get_value(self): return self._error def __str__(self): return str(self._error) class Client(object): """a convenient class for a pure rpc client *sock* is a socket-like. it should be blocking. """ def __init__(self, sock, encoder=None, notification_callback=None): self._endpoint = EndPoint(sock, encoder) if notification_callback is None: # ignore notifications by default self._notification_callback = lambda n: None else: self._notification_callback = notification_callback def _process_input_notification(self): n = self._endpoint.get_notification() if n: self._notification_callback(n) def _process_input_request(self): # ignore requests as we are a pure client # XXXwarn self._endpoint.get_request() def call(self, method, params): """synchronous call. send a request and wait for a response. return a result. or raise RPCError exception if the peer sends us an error. """ msgid = self._endpoint.send_request(method, params) while True: if not self._endpoint.receive_messages(): raise EOFError("EOF") res = self._endpoint.get_response(msgid) if res: result, error = res if error is None: return result raise RPCError(error) self._process_input_notification() self._process_input_request() def send_notification(self, method, params): """send a notification to the peer. """ self._endpoint.send_notification(method, params) def receive_notification(self): """wait for the next incoming message. intended to be used when we have nothing to send but want to receive notifications. """ if not self._endpoint.receive_messages(): raise EOFError("EOF") self._process_input_notification() self._process_input_request() def peek_notification(self): while True: rlist, _wlist = self._endpoint.selectable() rlist, _wlist, _xlist = select.select(rlist, [], [], 0) if not rlist: break self.receive_notification()
RPCError
rpc_calls.js
import { updateAddress, updateBalance, updateConnection, updateSmartLocker, updatePendingTxs, addPendingTx, updateTxHistory, selectAuthorizedKey, selectPendingKey } from 'lib/store/actions' // TODO: rpc should not dispatch actions // move that logic to components // replicate what's done in extension export const rpc = { getAddress () { return function (dispatch) { window.web3.eth.getAccounts().then(([a]) => { if (a) dispatch(updateAddress(a)); }); } }, getBalance (address) { return function (dispatch) { if (address) { window.web3.eth .getBalance(address) .then(balance => dispatch(updateBalance(balance))) .catch(() => dispatch(updateBalance(0))) } else dispatch(updateBalance(0)); } }, getSmartLockerState (dispatchIfError=true) { return function (dispatch) { if (window.popLocker) { window.popLocker .getSmartLockerState() .then((state) => { if (state.status != 'error' || dispatchIfError) dispatch(updateSmartLocker(state)); }); } else dispatch(updateSmartLocker({ status: 'invalid' })); } }, setSmartLockerAddress (addr) { return function (dispatch) { dispatch(updatePendingTxs([], addr)); dispatch(updateTxHistory(null, addr)); dispatch(selectAuthorizedKey(null, addr)); dispatch(selectPendingKey(null, addr)); return window.popLocker.setSmartLockerAddress(addr); } }, removeKeyRequest (addr) { return function (dispatch) { return window.popLocker.removeKeyRequest(addr); } }, send (to, amount, sendAll=false) { return function (dispatch, getState) { const { address, connection, locker } = getState(); if (connection) { const tx = { to, from: address, value: window.web3.utils.toHex(window.web3.utils.toWei(amount || '0', 'ether')), sendAll, timeStamp: Date.now()/1000|0, chainId: config.constants.CHAIN_ID }
if (locker.status != 'smart' && locker.status != 'pending') dispatch(addPendingTx(tx)) }) } } }, isListening () { return function (dispatch) { window.web3.eth.net.isListening() .then(r => dispatch(updateConnection(r? 1 : 0))) } }, fetchTxHistory() { return function (dispatch, getState) { const { address, txHistory, pendingTxs } = getState(); if (address) { if (!txHistory.transactions) { dispatchUpdateTxHistory(dispatch, address, pendingTxs.transactions); } else { fetchEtherscanTxHistory(address, 1, 1) .then(([latestTx]) => { const latestTxHash = latestTx? latestTx.hash : null; const firstHistoryTxHash = txHistory.transactions[0]? txHistory.transactions[0].hash : null; if (latestTxHash != firstHistoryTxHash) dispatchUpdateTxHistory(dispatch, address, pendingTxs.transactions); }) } } } } } function dispatchUpdateTxHistory(dispatch, address, pendingTxs) { fetchEtherscanTxHistory(address, 1, 500) .then(txHistory => { dispatch(updateTxHistory(txHistory, address)); dispatch(updatePendingTxs(resolvePendingTxs(txHistory, pendingTxs), address)); }) } function fetchEtherscanTxHistory(address, page, offset) { const url = `${config.constants.ETHERSCAN_URL}&address=${address}&page=${page}&offset=${offset}`; const internalUrl = `${config.constants.ETHERSCAN_URL}internal&address=${address}&page=${page}&offset=${offset}`; return Promise.all([fetch(url), fetch(internalUrl)]) .then(([response, internalResponse]) => Promise.all([response.json(), internalResponse.json()])) .then(([response, internalResponse]) => [response.result.filter(a => {return a.isError == '0' || a.to != address.toLowerCase() || a.value != '0'}), internalResponse.result]) .then(([response, internalResponse]) => internalResponse.concat(response) .sort((a, b) => {return b.timeStamp - a.timeStamp}) .filter((a, b, c) => {return !b || a.hash != c[b-1].hash})); } function resolvePendingTxs(txHistory, pendingTxs) { let resolvedPendingTxs = []; for (const pendingTxIndex in pendingTxs) { let found = false; for (const txHistoryIndex in txHistory) { if (txHistory[txHistoryIndex].hash == pendingTxs[pendingTxIndex].hash) { found = true; break; } if (5000 + txHistory[txHistoryIndex].timeStamp < pendingTxs[pendingTxIndex].timeStamp) break; } if (!found) resolvedPendingTxs = [...resolvedPendingTxs, pendingTxs[pendingTxIndex]]; } return resolvedPendingTxs; }
window.web3.eth.sendTransaction(tx) .on('transactionHash', (txHash) => { tx.hash = txHash;
_createProductionServer.ts
import { createProdMockServer } from 'vite-plugin-mock/es/createProdMockServer'; import userMock from './sys/user'; import menuMock from './sys/menu'; import tableDemoMock from './demo/table-demo'; /** * Used in a production environment. Need to manually import all modules */ export function
() { createProdMockServer([...userMock, ...menuMock, ...tableDemoMock]); }
setupProdMockServer
vec2.ts
import { EPSILON, ICloneable, IEquatable } from "@chemistry/common"; export interface IVec2 { x: number; y: number; } export class Vec2 implements IVec2, ICloneable<Vec2>, IEquatable<Vec2> { public static add(v1: Vec2, v2: Vec2): Vec2 { return new Vec2(v1.x + v2.x, v1.y + v2.y); } public static sub(v1: Vec2, v2: Vec2): Vec2 { return new Vec2(v1.x - v2.x, v1.y - v2.y); } public static dot(v1: Vec2, v2: Vec2): number { return (v1.x * v2.x + v1.y * v2.y); } public static equals(v1: Vec2, v2: Vec2): boolean { return (Math.abs(v1.x - v2.x) < EPSILON) && (Math.abs(v1.y - v2.y) < EPSILON); } public x: number; public y: number; constructor(x: number, y: number) { this.x = x; this.y = y; } public get length(): number { return Math.sqrt(this.x * this.x + this.y * this.y); } public get lengthPow2(): number { return (this.x * this.x + this.y * this.y); } public get avg(): number { return (this.x + this.y) / 3;
} public scale(num: number): Vec2 { return new Vec2(this.x * num, this.y * num); } public normalize() { const len = this.length; if (len === 0) { throw new Error("Can not normalize zero vector"); } else { return new Vec2(this.x / len, this.y / len); } } public dot(vec: Vec2): number { return Vec2.dot(this, vec); } public sub(vec: Vec2): Vec2 { return Vec2.sub(this, vec); } public add(vector: Vec2): Vec2 { return Vec2.add(this, vector); } public equals(vec: Vec2) { return Vec2.equals(this, vec); } public clone() { return new Vec2(this.x, this.y); } public toString() { return "(" + this.x.toFixed(3) + "," + this.y.toFixed(3) + ")"; } }
product.module.ts
import { NgModule } from '@angular/core'; import { RouterModule } from '@angular/router'; import { ProductListComponent } from './product-list.component'; import { ProductDetailComponent } from './product-detail.component'; import { ProductEditComponent } from './product-edit/product-edit.component'; import { ProductEditInfoComponent } from './product-edit/product-edit-info.component'; import { ProductEditTagsComponent } from './product-edit/product-edit-tags.component'; import { ProductResolver } from './product-resolver.service'; import { ProductEditGuard } from './product-edit/product-edit.guard'; import { SharedModule } from '../shared/shared.module'; @NgModule({ imports: [ SharedModule, RouterModule.forChild([ {
{ path: '', component: ProductListComponent }, { path: ':id', component: ProductDetailComponent, resolve: { resolvedData: ProductResolver } }, { path: ':id/edit', component: ProductEditComponent, canDeactivate: [ProductEditGuard], resolve: { resolvedData: ProductResolver }, children: [ { path: '', redirectTo: 'info', pathMatch: 'full' }, { path: 'info', component: ProductEditInfoComponent }, { path: 'tags', component: ProductEditTagsComponent } ] } ] } ]) ], declarations: [ ProductListComponent, ProductDetailComponent, ProductEditComponent, ProductEditInfoComponent, ProductEditTagsComponent ] }) export class ProductModule {}
path: '', children: [
aula21.py
def teste():
s=10 n=4 print(f'no programa N vale {n}') print(f'no programa S vale {s}') teste()
global s print(f'na função teste S vale {s+2}') print(f'na função teste N vale {n+1}')
testMentalCapacity.js
'use strict'; const TestWrapper = require('test/util/TestWrapper'); const StartApply = require('app/steps/ui/screeners/startapply'); const StopPage = require('app/steps/ui/stoppage'); const testCommonContent = require('test/component/common/testCommonContent.js'); const commonContent = require('app/resources/en/translation/common'); const config = require('config'); const cookies = [{ name: config.redis.eligibilityCookie.name, content: { nextStepUrl: '/mental-capacity', pages: [ '/death-certificate', '/deceased-domicile', '/iht-completed', '/will-left', '/will-original', '/applicant-executor' ] } }]; describe('mental-capacity', () => { let testWrapper; const expectedNextUrlForStartApply = StartApply.getUrl(); const expectedNextUrlForStopPage = StopPage.getUrl('mentalCapacity'); beforeEach(() => { testWrapper = new TestWrapper('MentalCapacity'); }); afterEach(() => { testWrapper.destroy(); }); describe('Verify Content, Errors and Redirection', () => { testCommonContent.runTest('MentalCapacity', null, null, cookies); it('test content loaded on the page', (done) => { const contentData = { assessingMentalCapacity: config.links.assessingMentalCapacity }; testWrapper.testContent(done, contentData, [], cookies); }); it('test errors message displayed for missing data', (done) => { testWrapper.testErrors(done, {}, 'required', [], cookies); }); it(`test it redirects to next page: ${expectedNextUrlForStartApply}`, (done) => { const sessionData = { screeners: { deathCertificate: 'optionYes', domicile: 'optionYes', completed: 'optionYes', left: 'optionYes', original: 'optionYes', executor: 'optionYes'
testWrapper.agent.post('/prepare-session/form') .send(sessionData) .end(() => { const data = { mentalCapacity: 'optionYes' }; testWrapper.testRedirect(done, data, expectedNextUrlForStartApply, cookies); }); }); it(`test it redirects to stop page: ${expectedNextUrlForStopPage}`, (done) => { const sessionData = { screeners: { deathCertificate: 'optionYes', domicile: 'optionYes', completed: 'optionYes', left: 'optionYes', original: 'optionYes', executor: 'optionYes' } }; testWrapper.agent.post('/prepare-session/form') .send(sessionData) .end(() => { const data = { mentalCapacity: 'optionNo' }; testWrapper.testRedirect(done, data, expectedNextUrlForStopPage, cookies); }); }); it('test "save and close" link is not displayed on the page', (done) => { const playbackData = { saveAndClose: commonContent.saveAndClose }; testWrapper.testContentNotPresent(done, playbackData); }); }); });
} };
list-identity-pool-usage.rs
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ use aws_sdk_cognitosync::{Client, Config, Error, Region, PKG_VERSION}; use aws_types::region; use aws_types::region::ProvideRegion; use structopt::StructOpt; #[derive(Debug, StructOpt)] struct Opt { /// The AWS Region. #[structopt(short, long)] region: Option<String>, /// Whether to display additional information. #[structopt(short, long)] verbose: bool, } /// Lists the identity pools registered with Amazon Cognito in the Region. /// # Arguments /// /// * `[-r REGION]` - The region containing the buckets. /// If not supplied, uses the value of the **AWS_REGION** environment variable. /// If the environment variable is not set, defaults to **us-west-2**. /// * `[-g]` - Whether to display buckets in all regions. /// * `[-v]` - Whether to display additional information. #[tokio::main] async fn main() -> Result<(), Error> { tracing_subscriber::fmt::init(); let Opt { region, verbose } = Opt::from_args(); let region = region::ChainProvider::first_try(region.map(Region::new)) .or_default_provider() .or_else(Region::new("us-west-2")); println!(); if verbose { println!("Cognito client version: {}", PKG_VERSION); println!( "Region: {}", region.region().unwrap().as_ref() ); println!();
} let config = Config::builder().region(region).build(); let client = Client::from_conf(config); let response = client .list_identity_pool_usage() .max_results(10) .send() .await?; if let Some(pools) = response.identity_pool_usages { println!("Identity pools:"); for pool in pools { println!( " Identity pool ID: {}", pool.identity_pool_id.unwrap_or_default() ); println!( " Data storage: {}", pool.data_storage.unwrap_or_default() ); println!( " Sync sessions count: {}", pool.sync_sessions_count.unwrap_or_default() ); println!( " Last modified: {}", pool.last_modified_date.unwrap().to_chrono() ); println!(); } } println!("Next token: {:?}", response.next_token); Ok(()) }
signing.go
package gomason import ( "fmt" "github.com/pkg/errors" "log" "os" "os/exec" ) // It's a good default. You can install it anywhere. const defaultSigningProgram = "gpg" // SignBinary signs the given binary based on the entity and program given in metadata.json, possibly overridden by information in ~/.gomason func SignBinary(meta Metadata, binary string, verbose bool) (err error) { if verbose { log.Printf("Preparing to sign binary %s", binary) } // pull signing info out of metadata.json signInfo := meta.SignInfo signProg := signInfo.Program if signProg == "" { signProg = defaultSigningProgram } if verbose { log.Printf("Signing program is %s", signProg) } signEntity := signInfo.Email config, err := GetUserConfig() if err != nil { err = errors.Wrapf(err, "failed to get per-user config from ~/.gomason") } // email from .gomason overrides metadata if config.User.Email != "" { signEntity = config.User.Email } // program from .gomason overrides metadata if config.Signing.Program != "" { signProg = config.Signing.Program } if signEntity == "" { err = fmt.Errorf("Cannot sign without a signing entity (email).\n\nSet 'signing' section in metadata.json, or create ~/.gomason with the appropriate content.\n\nSee https://github.com/nikogura/gomason#config-reference for details.\n\n") return err } if verbose { log.Printf("Signing %s with identity %s.", binary, signEntity) } switch signProg { // insert other signing types here default: if verbose { log.Printf("Signing with default program.") } err = SignGPG(binary, signEntity, meta) if err != nil { err = errors.Wrap(err, fmt.Sprintf("failed to run %q", signProg)) return err } } return err } // VerifyBinary will verify the signature of a signed binary. func VerifyBinary(binary string, meta Metadata, verbose bool) (ok bool, err error) { // pull signing info out of metadata.json signInfo := meta.SignInfo signProg := signInfo.Program if signProg == "" {
signProg = defaultSigningProgram } switch signProg { // insert other signing types here default: if verbose { log.Printf("Verifying with default program.") } ok, err = VerifyGPG(binary, meta) if err != nil { err = errors.Wrap(err, fmt.Sprintf("failed to run %q", signProg)) return ok, err } } return ok, err } // SignGPG signs a given binary with GPG using the given signing entity. func SignGPG(binary string, signingEntity string, meta Metadata) (err error) { shellCmd, err := exec.LookPath("gpg") if err != nil { err = errors.Wrap(err, fmt.Sprintf("can't find signing program 'gpg' in path. Is it installed?")) return err } var cmd *exec.Cmd if keyring, ok := meta.Options["keyring"]; ok { // use a custom keyring for testing cmd = exec.Command(shellCmd, "--trustdb", meta.Options["trustdb"].(string), "--no-default-keyring", "--keyring", keyring.(string), "-bau", signingEntity, binary) } else { // gpg -bau <email address> <file> // -b detatch -a ascii armor -u specify user cmd = exec.Command(shellCmd, "-bau", signingEntity, binary) } cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr cmd.Env = os.Environ() err = cmd.Run() if err != nil { err = errors.Wrap(err, fmt.Sprintf("failed to run %q", shellCmd)) } return err } // VerifyGPG Verifies signatures with gpg. func VerifyGPG(binary string, meta Metadata) (ok bool, err error) { sigFile := fmt.Sprintf("%s.asc", binary) shellCmd, err := exec.LookPath("gpg") if err != nil { err = errors.Wrap(err, fmt.Sprintf("can't find signing program 'gpg' in path. Is it installed?")) return ok, err } var cmd *exec.Cmd if keyring, ok := meta.Options["keyring"]; ok { // use a custom keyring for testing cmd = exec.Command(shellCmd, "--trustdb", meta.Options["trustdb"].(string), "--no-default-keyring", "--keyring", keyring.(string), "--verify", sigFile) } else { // gpg --verify <file> cmd = exec.Command(shellCmd, "--verify", sigFile) } cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr cmd.Env = os.Environ() err = cmd.Run() if err != nil { log.Printf("Verification Error: %s", err) return ok, err } ok = true return ok, err }
dashboard.factory.js
(function() { 'use strict'; angular .module('app.dashboard') .factory('dashboard', dashboard); /* @ngInject */ function dashboard(Restangular, apiEndPoints, utilities) { var dashboardAPI = Restangular.all(apiEndPoints.dashboard.main); var notificationAPI = Restangular.all(apiEndPoints.notification.main); return { getCompleteDashboardReport: getCompleteDashboardReport, refreshCurrentDollarRate: refreshCurrentDollarRate, searchQueryPageTop: searchQueryPageTop, getNotificationCount: getNotificationCount, getUnReadNotificationList: getUnReadNotificationList }; function getNotificationCount(){ return notificationAPI.customGET(apiEndPoints.notification.unreadCount); } function getUnReadNotificationList(markRead){ return notificationAPI.customGET(apiEndPoints.notification.unreadList, { mark_as_read: markRead }); }
return dashboardAPI.customGET('',{ }); } function searchQueryPageTop(queryObj){ return dashboardAPI.customGET(apiEndPoints.dashboard.searchPageTop, queryObj); } function refreshCurrentDollarRate(){ } } })();
function getCompleteDashboardReport(){
msg_update_fee_target.rs
// This file is Copyright its original authors, visible in version control // history. // // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // You may not use this file except in accordance with one or both of these // licenses. // This file is auto-generated by gen_target.sh based on target_template.txt // To modify it, modify target_template.txt and run gen_target.sh instead. #![cfg_attr(feature = "libfuzzer_fuzz", no_main)] #[cfg(not(fuzzing))] compile_error!("Fuzz targets need cfg=fuzzing"); extern crate lightning_fuzz; use lightning_fuzz::msg_targets::msg_update_fee::*; #[cfg(feature = "afl")] #[macro_use] extern crate afl; #[cfg(feature = "afl")] fn main() { fuzz!(|data| { msg_update_fee_run(data.as_ptr(), data.len()); }); } #[cfg(feature = "honggfuzz")] #[macro_use] extern crate honggfuzz; #[cfg(feature = "honggfuzz")] fn main() { loop { fuzz!(|data| { msg_update_fee_run(data.as_ptr(), data.len()); }); } } #[cfg(feature = "libfuzzer_fuzz")] #[macro_use] extern crate libfuzzer_sys; #[cfg(feature = "libfuzzer_fuzz")] fuzz_target!(|data: &[u8]| { msg_update_fee_run(data.as_ptr(), data.len()); }); #[cfg(feature = "stdin_fuzz")] fn main() { use std::io::Read; let mut data = Vec::with_capacity(8192); std::io::stdin().read_to_end(&mut data).unwrap(); msg_update_fee_run(data.as_ptr(), data.len()); } #[test] fn
() { use std::fs; use std::io::Read; use lightning_fuzz::utils::test_logger::StringBuffer; use std::sync::{atomic, Arc}; { let data: Vec<u8> = vec![0]; msg_update_fee_run(data.as_ptr(), data.len()); } let mut threads = Vec::new(); let threads_running = Arc::new(atomic::AtomicUsize::new(0)); if let Ok(tests) = fs::read_dir("test_cases/msg_update_fee") { for test in tests { let mut data: Vec<u8> = Vec::new(); let path = test.unwrap().path(); fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap(); threads_running.fetch_add(1, atomic::Ordering::AcqRel); let thread_count_ref = Arc::clone(&threads_running); let main_thread_ref = std::thread::current(); threads.push((path.file_name().unwrap().to_str().unwrap().to_string(), std::thread::spawn(move || { let string_logger = StringBuffer::new(); let panic_logger = string_logger.clone(); let res = if ::std::panic::catch_unwind(move || { msg_update_fee_test(&data, panic_logger); }).is_err() { Some(string_logger.into_string()) } else { None }; thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel); main_thread_ref.unpark(); res }) )); while threads_running.load(atomic::Ordering::Acquire) > 32 { std::thread::park(); } } } let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { println!("\nOutput of {}:\n{}\n", test, output); failed_outputs.push(test); } } if !failed_outputs.is_empty() { println!("Test cases which failed: "); for case in failed_outputs { println!("{}", case); } panic!(); } }
run_test_cases
lib.rs
#![deny(dead_code)] #![deny(unreachable_patterns)] #![deny(unused_extern_crates)] #![deny(unused_imports)] #![deny(unused_qualifications)] #![deny(clippy::all)] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![deny(unused_results)] #![deny(variant_size_differences)] //! A set of utilites used across crates. //! Note that these call some external commands: //! - `latexmk` (and by extension xelatex) //! - `pdftocairo` (only if required to convert a pdf image -- will gracefully fallback if not present) //! //! The following are not necessary for normal operation, //! but are useful in development: //! - `epubcheck` //! - `pdftotext` //! //! If used in combination with `bookbinder`, the following packages are needed for LaTex calls: //! //! -`titlesec` //! -`caption` //! -`geometry` //! -`ulem` //! -`textcase` //! -`xpatch` //! -`amsmath` //! -`amssymb` //! -`bookmark` //! -`booktabs` //! -`etoolbox` //! -`fancyhdr` //! -`fancyvrb` //! -`footnotehyper` //! -`listings` //! -`longtable` //! -`unicode-math` //! -`upquote` //! -`xcolor` //! -`xurl` //! -`fontspec` //! -`graphicx` //! -`microtype` //! -`hyperref` //! -`fmtcount` //! -`appendix` use std::borrow::Cow; use std::error::Error; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; mod num_conversions; use std::io::Write; mod isbn; mod mimetypes; use aho_corasick::AhoCorasick; pub use isbn::{display_isbn, validate_isbn}; pub use mimetypes::{GuessMimeType, MimeType, MimeTypeHelper}; mod svg; use lazy_static::lazy_static; pub use svg::{convert_svg_file_to_png, convert_svg_to_jpg, convert_svg_to_png, simplify_svg}; use temp_file_name::HashToString; pub mod fonts; lazy_static! { static ref HTML_FINDER: AhoCorasick = AhoCorasick::new(&HTML_TARGET_CHARS); static ref LATEX_FINDER: AhoCorasick = AhoCorasick::new(&LATEX_TARGET_CHARS); } static HTML_TARGET_CHARS: [&str; 4] = ["<", ">", "&", "'"]; static HTML_REPLACEMENTS: [&str; 4] = ["&lt;", "&gt;", "&amp;", "’"]; /// escape `input` for html output pub fn escape_to_html<'a, S: Into<Cow<'a, str>>>(input: S) -> Cow<'a, str> { let input = input.into(); let input_bytes = input.as_bytes(); if HTML_FINDER.is_match(input_bytes) { let mut wtr = Vec::with_capacity(input.len()); HTML_FINDER .stream_replace_all(input_bytes, &mut wtr, &HTML_REPLACEMENTS) .expect("Aho-Corasick error"); unsafe { Cow::Owned(String::from_utf8_unchecked(wtr)) } } else { input } } static LATEX_TARGET_CHARS: [&str; 16] = [ "…", "–", "—", "\u{a0}", "&", "%", "$", "#", "_", "{", "}", "[", "]", "~", "^", "\\", ]; static LATEX_REPLACEMENTS: [&str; 16] = [ "\\ldots{}", "--", "---", "~", "\\&", r"\%", r"\$", r"\#", r"\_", r"\{", r"\}", r"{[}", r"{]}", r"\textasciitilde{}", r"\textasciicircum{}", r"\textbackslash{}", ]; /// escape `input` for latex output pub fn escape_to_latex<'a, S: Into<Cow<'a, str>>>(input: S) -> Cow<'a, str> { let input = input.into(); let input_bytes = input.as_bytes(); if LATEX_FINDER.is_match(input_bytes) { let mut wtr = Vec::with_capacity(input.len()); LATEX_FINDER .stream_replace_all(input_bytes, &mut wtr, &LATEX_REPLACEMENTS) .expect("Aho-Corasick error"); unsafe { Cow::Owned(String::from_utf8_unchecked(wtr)) } } else { input } } /// call lualatex on a particular str and return the pdf pub fn call_latex(tex: &str) -> Result<Vec<u8>, std::io::Error> { _call_latex(tex, false) } /// call lualatex on a particular str and return the pdf, /// displaying lualatex's output as it goes pub fn call_latex_verbose(tex: &str) -> Result<Vec<u8>, std::io::Error> { _call_latex(tex, true) } /// call a latex engine on a particular str and return the pdf fn _call_latex(tex: &str, verbose: bool) -> Result<Vec<u8>, std::io::Error> { let filename_base = tex.hash_to_string(); let mut outdir = std::env::temp_dir(); outdir = outdir.join("bookbinder"); let tex_fn = format!("{}.tex", &filename_base); let texpath = outdir.join(tex_fn); let filename = format!("{}.pdf", &filename_base); let outpath = outdir.join(&filename); std::fs::write(&texpath, tex)?; let odir_arg = format!("-output-directory={}", &outdir.to_string_lossy()); let mut ltx = if !verbose { Command::new("latexmk") .args(&[ &odir_arg, "-xelatex", "-interaction=batchmode", "-halt-on-error", texpath.to_string_lossy().as_ref(), ]) .spawn()? } else { Command::new("latexmk") .args(&[&odir_arg, "-xelatex", texpath.to_string_lossy().as_ref()]) .spawn()? }; let _ = ltx.wait()?; if !outpath.exists() { let mut log = texpath; let _ = log.set_extension("log"); let log = std::fs::read_to_string(log).unwrap_or_else(|_| { "Latex error without log generated; perhaps LaTeX is not installed?".to_string() }); let e = std::io::Error::new(std::io::ErrorKind::Other, log); return Err(e); } let o = std::fs::read(outpath)?; Ok(o) } /// Call `epubcheck` on a file to check that it is a valid epub pub fn epubcheck(p: PathBuf) -> Result<(), String> { let epubcheck = Command::new("epubcheck") .arg(p.to_str().unwrap()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .output() .map_err(|_| "Error launching epubcheck -- is it installed?".to_string())?; if epubcheck.status.success() { Ok(()) } else { let (stdout, stderr) = unsafe { let stdout = String::from_utf8_unchecked(epubcheck.stdout); let stderr = String::from_utf8_unchecked(epubcheck.stderr); (stdout, stderr) }; let mut msg = String::new(); msg.push_str(&stdout); msg.push_str(&stderr); Err(msg) } } /// Convert an image at path `filepath` to a jpeg; /// generally common raster formats as well as svg and pdf are supported, /// but note that eps files are not pub fn convert_to_jpg<P: AsRef<Path>>(filepath: P) -> Result<Vec<u8>, Box<dyn Error>> { let p = filepath.as_ref(); let ext = p.extension().map(|o| o.to_str()).flatten(); match ext { Some("pdf") => { let data = std::fs::read(p)?; let svg = convert_pdf_to_svg(&data, None)?; let jpg = convert_svg_to_jpg(&svg, None)?; Ok(jpg) } Some("svg") => { let svg = std::fs::read_to_string(p)?; let jpg = convert_svg_to_jpg(&svg, None)?; Ok(jpg) } _ => { let mut output = Vec::new(); let dynamic_image = image::open(p)?; dynamic_image.write_to(&mut output, image::ImageOutputFormat::Jpeg(100))?; Ok(output) } } } /// convert a pdf file to an svg; requires that pdftocairo (part of poppler) /// be installed. /// Note that we can't link poppler without licensing difficulties, so there are no plans /// to incorporate this as a dependency. pub fn convert_pdf_to_svg(pdf: &[u8], dpi: Option<usize>) -> Result<String, Box<dyn Error>> { let dpi = dpi.unwrap_or(150).to_string(); let mut cv = Command::new("pdftocairo") .args(&["-svg", "-origpagesizes", "-r", &dpi, "-", "-"]) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn()?; let stdin = cv.stdin.as_mut().unwrap(); stdin.write_all(&pdf)?; let o = cv.wait_with_output()?; let mut svg = String::from_utf8(o.stdout)?; // remove hardcoded widths svg = svg.replacen(r#"width="432pt""#, r#"width="100%""#, 1); svg = svg.replacen( r#"height="648pt""#, r#"height="100%" preserveAspectRatio="xMidYMid meet" x="0px" y="0px""#, 1, ); Ok(svg) } /// get the current year as a string pub fn get_current_year() -> String { let now = time::now_utc(); time::strftime("%Y", &now).unwrap() } /// given a number, return the corresponding letter /// e.g. 0 -> A, 1 -> B, 2 -> C. /// Returns an error if the number is greater than 25 /// ``` /// # use bookbinder_common::number_to_letter; /// let number = 1; /// assert_eq!(number_to_letter(number), Ok('B')); /// ``` pub const fn number_to_letter(n: u8) -> Result<char, ()> { if n > 25 { Err(()) } else { let codepoint = 65 + n; let letter = codepoint as char; Ok(letter) } } /// given a number, return it in roman format /// e.g. 1 -> I, 10 -> X, etc /// ``` /// # use bookbinder_common::number_to_roman; /// let number = 1; /// assert_eq!(number_to_roman(number), "I"); /// ``` pub const fn number_to_roman(n: u8) -> &'static str { num_conversions::number_to_roman(n) } /// given a number, return its equivalent in words /// ``` /// # use bookbinder_common::number_to_words; /// let number = 1; /// assert_eq!(number_to_words(number), "ONE"); /// ``` pub const fn number_to_words(n: u8) -> &'static str { num_conversions::number_to_words(n) } #[cfg(test)] mod tests { use super::*; #[test] fn test_numbers_to_letter() { assert_eq!(number_to_letter(0), Ok('A')); assert_eq!(number_to_letter(25), Ok('Z')); assert_eq!(number_to_letter(27), Err(())); } #[test] fn test_get
assert_eq!(get_current_year(), "2020".to_string()); } #[test] fn test_hash_to_string() { let s = "Hello world".hash_to_string(); assert_eq!(s, "2216321107127430384"); } #[test] fn test_latex_escapes() { let escapes = [ ("&", "\\&"), ("%", "\\%"), ("$", "\\$"), ("#", "\\#"), ("_", "\\_"), ("{Hello}", "\\{Hello\\}"), ("[Hello]", "{[}Hello{]}"), ("~", "\\textasciitilde{}"), ("^", "\\textasciicircum{}"), ("\\", "\\textbackslash{}"), //("'quoted'", "\\textquotesingle{}quoted\\textquotesingle{}"), //("\"doublequoted\"", "\\textquoteddbl{}doublequoted\\textquoteddbl{}"), //("`", "\\textasciigrave{}"), //("<>", "\\textless{}\\textgreater{}"), //("|", "\\textbar{}") ]; for (input, expected) in escapes.iter() { let s = input.to_string(); let out = escape_to_latex(&s); assert_eq!(out.to_string(), *expected); } } #[test] fn test_numbers_to_word() { assert_eq!(number_to_words(0), "ZERO"); assert_eq!(number_to_words(5), "FIVE"); assert_eq!(number_to_words(12), "TWELVE"); assert_eq!(number_to_words(25), "TWENTY-FIVE"); assert_eq!(number_to_words(125), "ONE HUNDRED AND TWENTY-FIVE"); } #[test] fn test_numbers_to_roman() { assert_eq!(number_to_roman(0), ""); assert_eq!(number_to_roman(1), "I"); } }
_current_year() {
PolygonPShape.pyde
""" PrimitivePShape. Using a PShape to display a custom polygon. """ def setup(): size(640, 360, P2D) smooth() # First create the shape. global star star = createShape() star.beginShape() # You can set fill and stroke. star.fill(102) star.stroke(255) star.strokeWeight(2) # Here, we are hardcoding a series of vertices. star.vertex(0, -50) star.vertex(14, -20) star.vertex(47, -15) star.vertex(23, 7) star.vertex(29, 40) star.vertex(0, 25) star.vertex(-29, 40) star.vertex(-23, 7) star.vertex(-47, -15) star.vertex(-14, -20) star.endShape(CLOSE) def draw():
background(51) # We can use translate to move the PShape. translate(mouseX, mouseY) # Display the shape. shape(star)
peers.go
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package discovery import ( "encoding/json" "fmt" "io" "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-gm/cmd/common" "github.com/hyperledger/fabric-gm/discovery/client" "github.com/hyperledger/fabric-gm/protos/msp" "github.com/pkg/errors" ) // NewPeerCmd creates a new PeerCmd with the given Stub and ResponseParser func
(stub Stub, parser ResponseParser) *PeerCmd { return &PeerCmd{ stub: stub, parser: parser, } } // PeerCmd executes channelPeer listing command type PeerCmd struct { stub Stub server *string channel *string parser ResponseParser } // SetServer sets the server of the PeerCmd func (pc *PeerCmd) SetServer(server *string) { pc.server = server } // SetChannel sets the channel of the PeerCmd func (pc *PeerCmd) SetChannel(channel *string) { pc.channel = channel } // Execute executes the command func (pc *PeerCmd) Execute(conf common.Config) error { channel := "" if pc.channel != nil { channel = *pc.channel } if pc.server == nil || *pc.server == "" { return errors.New("no server specified") } server := *pc.server req := discovery.NewRequest() if channel != "" { req = req.OfChannel(channel) req = req.AddPeersQuery() } else { req = req.AddLocalPeersQuery() } res, err := pc.stub.Send(server, conf, req) if err != nil { return err } return pc.parser.ParseResponse(channel, res) } // PeerResponseParser parses a channelPeer response type PeerResponseParser struct { io.Writer } // ParseResponse parses the given response about the given channel func (parser *PeerResponseParser) ParseResponse(channel string, res ServiceResponse) error { var listPeers peerLister if channel == "" { listPeers = res.ForLocal() } else { listPeers = &simpleChannelResponse{res.ForChannel(channel)} } peers, err := listPeers.Peers() if err != nil { return err } channelState := channel != "" b, _ := json.MarshalIndent(assemblePeers(peers, channelState), "", "\t") fmt.Fprintln(parser.Writer, string(b)) return nil } func assemblePeers(peers []*discovery.Peer, withChannelState bool) interface{} { if withChannelState { var peerSlices []channelPeer for _, p := range peers { peerSlices = append(peerSlices, rawPeerToChannelPeer(p)) } return peerSlices } var peerSlices []localPeer for _, p := range peers { peerSlices = append(peerSlices, rawPeerToLocalPeer(p)) } return peerSlices } type channelPeer struct { MSPID string LedgerHeight uint64 Endpoint string Identity string Chaincodes []string } type localPeer struct { MSPID string Endpoint string Identity string } type peerLister interface { Peers() ([]*discovery.Peer, error) } type simpleChannelResponse struct { discovery.ChannelResponse } func (scr *simpleChannelResponse) Peers() ([]*discovery.Peer, error) { return scr.ChannelResponse.Peers() } func rawPeerToChannelPeer(p *discovery.Peer) channelPeer { var ledgerHeight uint64 var ccs []string if p.StateInfoMessage != nil && p.StateInfoMessage.GetStateInfo() != nil && p.StateInfoMessage.GetStateInfo().Properties != nil { properties := p.StateInfoMessage.GetStateInfo().Properties ledgerHeight = properties.LedgerHeight for _, cc := range properties.Chaincodes { if cc == nil { continue } ccs = append(ccs, cc.Name) } } var endpoint string if p.AliveMessage != nil && p.AliveMessage.GetAliveMsg() != nil && p.AliveMessage.GetAliveMsg().Membership != nil { endpoint = p.AliveMessage.GetAliveMsg().Membership.Endpoint } sID := &msp.SerializedIdentity{} proto.Unmarshal(p.Identity, sID) return channelPeer{ MSPID: p.MSPID, Endpoint: endpoint, LedgerHeight: ledgerHeight, Identity: string(sID.IdBytes), Chaincodes: ccs, } } func rawPeerToLocalPeer(p *discovery.Peer) localPeer { var endpoint string if p.AliveMessage != nil && p.AliveMessage.GetAliveMsg() != nil && p.AliveMessage.GetAliveMsg().Membership != nil { endpoint = p.AliveMessage.GetAliveMsg().Membership.Endpoint } sID := &msp.SerializedIdentity{} proto.Unmarshal(p.Identity, sID) return localPeer{ MSPID: p.MSPID, Endpoint: endpoint, Identity: string(sID.IdBytes), } }
NewPeerCmd
account.rs
use super::db_id_format; use chrono::NaiveDateTime; use diesel::{ self, pg::PgConnection, result::QueryResult, ExpressionMethods, QueryDsl, RunQueryDsl, }; use crate::bldr_core::metrics::CounterMetric; use crate::metrics::Counter; use crate::schema::account::{account_tokens, accounts}; #[derive(Debug, Identifiable, Serialize, Queryable)] pub struct Account { #[serde(with = "db_id_format")] pub id: i64, pub email: String, pub name: String, pub created_at: Option<NaiveDateTime>, pub updated_at: Option<NaiveDateTime>, } #[derive(Identifiable, Debug, Serialize, Queryable)] #[table_name = "account_tokens"] pub struct
{ #[serde(with = "db_id_format")] pub id: i64, #[serde(with = "db_id_format")] pub account_id: i64, pub token: String, pub created_at: Option<NaiveDateTime>, } #[derive(Insertable)] #[table_name = "accounts"] pub struct NewAccount<'a> { pub email: &'a str, pub name: &'a str, } impl Account { pub fn get(name: &str, conn: &PgConnection) -> QueryResult<Account> { Counter::DBCall.increment(); accounts::table .filter(accounts::name.eq(name)) .get_result(conn) } pub fn get_by_id(id: i64, conn: &PgConnection) -> QueryResult<Account> { Counter::DBCall.increment(); accounts::table.find(id).get_result(conn) } pub fn create(account: &NewAccount, conn: &PgConnection) -> QueryResult<Account> { Counter::DBCall.increment(); diesel::insert_into(accounts::table) .values(account) .get_result(conn) } pub fn find_or_create(account: &NewAccount, conn: &PgConnection) -> QueryResult<Account> { Counter::DBCall.increment(); match diesel::insert_into(accounts::table) .values(account) .on_conflict(accounts::name) .do_nothing() .get_result(conn) { Ok(account) => Ok(account), Err(_) => accounts::table .filter(accounts::name.eq(account.name)) .get_result(conn), } } pub fn update(id: u64, email: &str, conn: &PgConnection) -> QueryResult<usize> { Counter::DBCall.increment(); diesel::update(accounts::table.find(id as i64)) .set(accounts::email.eq(email)) .execute(conn) } } #[derive(Insertable)] #[table_name = "account_tokens"] pub struct NewAccountToken<'a> { pub account_id: i64, pub token: &'a str, } impl AccountToken { pub fn list(account_id: u64, conn: &PgConnection) -> QueryResult<Vec<AccountToken>> { Counter::DBCall.increment(); account_tokens::table .filter(account_tokens::account_id.eq(account_id as i64)) .get_results(conn) } pub fn create(req: &NewAccountToken, conn: &PgConnection) -> QueryResult<AccountToken> { Counter::DBCall.increment(); diesel::insert_into(account_tokens::table) .values(req) .on_conflict(account_tokens::account_id) .do_update() .set(account_tokens::token.eq(req.token)) .get_result(conn) } pub fn delete(id: u64, conn: &PgConnection) -> QueryResult<usize> { Counter::DBCall.increment(); diesel::delete(account_tokens::table.find(id as i64)).execute(conn) } }
AccountToken
translate.rs
use proc_macro2::{Group, Ident, Span, TokenStream, TokenTree}; use std::collections::{HashMap, HashSet}; use quote::{format_ident, quote, quote_spanned}; pub use self::Expr::*; use crate::analysis; use crate::ast::*; pub fn report_error(span: Span, msg: String) -> TokenStream { quote_spanned!(span=>compile_error!(#msg);) } pub fn report_error_expr(span: Span, msg: String) -> TokenStream { // panic!() to avoid "Mismatched types" error quote_spanned!(span=> { compile_error!(#msg); panic!() }) } /// Test if the group begins with a specific marker character, and if so, return the remaining tokens. fn group_check_prefix(group: &Group, prefix: char) -> Option<TokenStream> { let mut iter = group.stream().into_iter(); match iter.next() { Some(TokenTree::Punct(p)) if p.as_char() == prefix => { Some(iter.collect()) } _ => None } } fn extra_args_def(grammar: &Grammar) -> TokenStream { let args: Vec<TokenStream> = grammar .args .iter() .map(|&(ref name, ref tp)| quote!(, #name: #tp)) .collect(); quote!(#(#args)*) } fn extra_args_call(grammar: &Grammar) -> TokenStream { let args: Vec<TokenStream> = grammar .args .iter() .map(|&(ref name, _)| quote!(, #name)) .collect(); quote!(#(#args)*) } #[derive(Clone)] struct Context<'a> { rules: &'a HashMap<String, &'a Rule>, rules_from_args: HashSet<String>, extra_args_call: TokenStream, extra_args_def: TokenStream, } pub(crate) fn compile_grammar(grammar: &Grammar) -> TokenStream { let name = &grammar.name; let mut items = vec![make_parse_state(&grammar)]; let analysis = analysis::check(&grammar); let context = &Context { rules: &analysis.rules, rules_from_args: HashSet::new(), extra_args_call: extra_args_call(grammar), extra_args_def: extra_args_def(grammar), }; let mut seen_rule_names = HashSet::new(); for item in &grammar.items { match item { Item::Use(tt) => items.push(tt.clone()), Item::Rule(rule) => { if seen_rule_names.insert(rule.name.to_string()) { if rule.cached && !(rule.params.is_empty() && rule.ty_params.is_none()) { items.push(report_error( rule.name.span(), format!("rules with arguments cannot use #[cache]"), )); } if rule.visibility.is_some() { for param in &rule.params { match &param.ty { RuleParamTy::Rule(..) => items.push(report_error( param.name.span(), format!("parameters on `pub rule` must be Rust types"), )), _ => {} } } items.push(compile_rule_export(context, rule)); } items.push(compile_rule(context, rule)); } else { items.push(report_error( rule.name.span(), format!("duplicate rule `{}`", rule.name), )); } } } } let doc = &grammar.doc; let input_type = &grammar.input_type; let visibility = &grammar.visibility; let mut errors = Vec::new(); for rec in &analysis.left_recursion { errors.push(report_error(rec.span, rec.msg())); } quote! { #doc #visibility mod #name { #[allow(unused_imports)] use super::*; type Input = #input_type; type PositionRepr = <Input as ::peg::Parse>::PositionRepr; #(#errors)* #(#items)* } } } fn make_parse_state(grammar: &Grammar) -> TokenStream { let mut cache_fields_def: Vec<TokenStream> = Vec::new(); let mut cache_fields: Vec<Ident> = Vec::new(); for rule in grammar.iter_rules() { if rule.cached { let name = format_ident!("{}_cache", rule.name); let ret_ty = rule.ret_type.clone().unwrap_or_else(|| quote!(())); cache_fields_def.push( quote! { #name: ::std::collections::HashMap<usize, ::peg::RuleResult<#ret_ty>> }, ); cache_fields.push(name); } } quote! { struct ParseState<'input> { _phantom: ::std::marker::PhantomData<&'input ()>, #(#cache_fields_def),* } impl<'input> ParseState<'input> { fn new() -> ParseState<'input> { ParseState { _phantom: ::std::marker::PhantomData, #(#cache_fields: ::std::collections::HashMap::new()),* } } } } } fn rule_params_list(rule: &Rule) -> Vec<TokenStream> { rule.params.iter().map(|param| { let name = &param.name; match &param.ty { RuleParamTy::Rust(ty) => quote!{ #name: #ty }, RuleParamTy::Rule(ty) => quote!{ #name: impl Fn(&'input Input, &mut ParseState<'input>, &mut ::peg::error::ErrorState, usize) -> ::peg::RuleResult<#ty> }, } }).collect() } fn compile_rule(context: &Context, rule: &Rule) -> TokenStream { let ref rule_name = rule.name; let name = format_ident!("__parse_{}", rule.name); let ret_ty = rule.ret_type.clone().unwrap_or_else(|| quote!(())); let result_used = rule.ret_type.is_some(); let ty_params = rule.ty_params.as_ref().map(|x| &x[..]).unwrap_or(&[]); let mut context = context.clone(); context .rules_from_args .extend(rule.params.iter().map(|param| param.name.to_string())); let body = compile_expr(&context, &rule.expr, result_used); let wrapped_body = if cfg!(feature = "trace") { let str_rule_name = rule_name.to_string(); quote! {{ let loc = ::peg::Parse::position_repr(__input, __pos); println!("[PEG_TRACE] Attempting to match rule `{}` at {}", #str_rule_name, loc); let __peg_result: ::peg::RuleResult<#ret_ty> = {#body}; match __peg_result { ::peg::RuleResult::Matched(epos, v) => { let eloc = ::peg::Parse::position_repr(__input, epos); println!("[PEG_TRACE] Matched rule `{}` at {} to {}", #str_rule_name, loc, eloc); ::peg::RuleResult::Matched(epos, v) } ::peg::RuleResult::Failed => { println!("[PEG_TRACE] Failed to match rule `{}` at {}", #str_rule_name, loc); ::peg::RuleResult::Failed } } }} } else { body }; let extra_args_def = &context.extra_args_def; let rule_params = rule_params_list(rule); if rule.cached { let cache_field = format_ident!("{}_cache", rule.name); let cache_trace = if cfg!(feature = "trace") { let str_rule_name = rule.name.to_string(); quote! { let loc = ::peg::Parse::position_repr(__input, __pos); match &entry { &::peg::RuleResult::Matched(..) => println!("[PEG_TRACE] Cached match of rule {} at {}", #str_rule_name, loc), &Failed => println!("[PEG_TRACE] Cached fail of rule {} at {}", #str_rule_name, loc), }; } } else { quote!() }; quote! { fn #name<'input #(, #ty_params)*>(__input: &'input Input, __state: &mut ParseState<'input>, __err_state: &mut ::peg::error::ErrorState, __pos: usize #extra_args_def #(, #rule_params)*) -> ::peg::RuleResult<#ret_ty> { #![allow(non_snake_case, unused)] if let Some(entry) = __state.#cache_field.get(&__pos) { #cache_trace return entry.clone(); } let __rule_result = #wrapped_body; __state.#cache_field.insert(__pos, __rule_result.clone()); __rule_result } } } else { quote! { fn #name<'input #(, #ty_params)*>(__input: &'input Input, __state: &mut ParseState<'input>, __err_state: &mut ::peg::error::ErrorState, __pos: usize #extra_args_def #(, #rule_params)*) -> ::peg::RuleResult<#ret_ty> { #![allow(non_snake_case, unused)] #wrapped_body } } } } fn compile_rule_export(context: &Context, rule: &Rule) -> TokenStream { let doc = &rule.doc; let name = &rule.name; let ret_ty = rule.ret_type.clone().unwrap_or_else(|| quote!(())); let visibility = &rule.visibility; let parse_fn = format_ident!("__parse_{}", rule.name.to_string(), span = name.span()); let ty_params = rule.ty_params.as_ref().map(|x| &x[..]).unwrap_or(&[]); let rule_params = rule_params_list(rule); let rule_params_call: Vec<TokenStream> = rule .params .iter() .map(|param| { let param_name = &param.name; quote!(#param_name) }) .collect(); let extra_args_def = &context.extra_args_def; let extra_args_call = &context.extra_args_call; quote! { #doc #visibility fn #name<'input #(, #ty_params)*>(__input: &'input Input #extra_args_def #(, #rule_params)*) -> ::std::result::Result<#ret_ty, ::peg::error::ParseError<PositionRepr>> { #![allow(non_snake_case, unused)] let mut __err_state = ::peg::error::ErrorState::new(::peg::Parse::start(__input)); let mut __state = ParseState::new(); match #parse_fn(__input, &mut __state, &mut __err_state, ::peg::Parse::start(__input) #extra_args_call #(, #rule_params_call)*) { ::peg::RuleResult::Matched(__pos, __value) => { if ::peg::Parse::is_eof(__input, __pos) { return Ok(__value) } else { __err_state.mark_failure(__pos, "EOF"); } } _ => () } __state = ParseState::new(); __err_state.reparse_for_error(); match #parse_fn(__input, &mut __state, &mut __err_state, ::peg::Parse::start(__input) #extra_args_call #(, #rule_params_call)*) { ::peg::RuleResult::Matched(__pos, __value) => { if ::peg::Parse::is_eof(__input, __pos) { panic!("Parser is nondeterministic: succeeded when reparsing for error position"); } else { __err_state.mark_failure(__pos, "EOF"); } } _ => () } Err(__err_state.into_parse_error(__input)) } } } fn name_or_ignore(n: Option<&Ident>) -> TokenStream { match n { Some(n) => quote!(#n), None => quote!(_), } } fn ordered_choice(mut rs: impl DoubleEndedIterator<Item = TokenStream>) -> TokenStream { rs.next_back().map(|last| rs.rfold(last, |fallback, preferred| { quote! {{ let __choice_res = #preferred; match __choice_res { ::peg::RuleResult::Matched(__pos, __value) => ::peg::RuleResult::Matched(__pos, __value), ::peg::RuleResult::Failed => #fallback } }} })).expect("ordered choice must not be empty") } fn labeled_seq(context: &Context, exprs: &[TaggedExpr], inner: TokenStream) -> TokenStream { exprs.iter().rfold(inner, |then, expr| { let value_name = expr.name.as_ref(); let name_pat = name_or_ignore(value_name); let seq_res = compile_expr(context, &expr.expr, value_name.is_some()); quote! {{ let __seq_res = #seq_res; match __seq_res { ::peg::RuleResult::Matched(__pos, #name_pat) => { #then } ::peg::RuleResult::Failed => ::peg::RuleResult::Failed, } }} }) } fn cond_swap<T>(swap: bool, tup: (T, T)) -> (T, T) { let (a, b) = tup; if swap { (b, a) } else { (a, b) } } fn compile_expr(context: &Context, e: &Expr, result_used: bool) -> TokenStream { match e { LiteralExpr(ref s) => { let escaped_str = s.to_string(); quote! { match ::peg::ParseLiteral::parse_string_literal(__input, __pos, #s) { ::peg::RuleResult::Matched(__pos, __val) => ::peg::RuleResult::Matched(__pos, __val), ::peg::RuleResult::Failed => __err_state.mark_failure(__pos, #escaped_str) }} } PatternExpr(ref pattern_group) => { let invert = false; let pattern = pattern_group.stream(); let pat_str = pattern.to_string(); let (in_set, not_in_set) = cond_swap( invert, ( quote! { ::peg::RuleResult::Matched(__next, ()) }, quote! { __err_state.mark_failure(__pos, #pat_str) }, ), ); let in_set_arm = quote!( #pattern => #in_set, ); quote! { match ::peg::ParseElem::parse_elem(__input, __pos) { ::peg::RuleResult::Matched(__next, __ch) => match __ch { #in_set_arm _ => #not_in_set, } ::peg::RuleResult::Failed => __err_state.mark_failure(__pos, #pat_str) } } } RuleExpr(ref rule_name, ref rule_args) if context.rules_from_args.contains(&rule_name.to_string()) => { if !rule_args.is_empty() { return report_error_expr( rule_name.span(), format!("rule closure does not accept arguments"), ); } quote! { #rule_name(__input, __state, __err_state, __pos) } } RuleExpr(ref rule_name, ref rule_args) => { let rule_name_str = rule_name.to_string(); let rule_def = if let Some(rule_def) = context.rules.get(&rule_name_str) { rule_def } else { return report_error_expr( rule_name.span(), format!("undefined rule `{}`", rule_name_str), ); }; if result_used && rule_def.ret_type.is_none() { let msg = format!( "using result of rule `{}`, which does not return a value", rule_name_str ); return report_error_expr(rule_name.span(), msg); } if rule_def.params.len() != rule_args.len() { return report_error_expr( rule_name.span(), format!( "this rule takes {} parameters but {} parameters were supplied", rule_def.params.len(), rule_args.len() ), ); } for (param, arg) in rule_def.params.iter().zip(rule_args.iter()) { match (&param.ty, &arg) { (RuleParamTy::Rust(..), RuleArg::Peg(..)) => { return report_error_expr( rule_name.span(), format!( "parameter `{}` expects a value, but a PEG expression was passed", param.name ), ); } (RuleParamTy::Rule(..), RuleArg::Rust(..)) => { return report_error_expr( rule_name.span(), format!( "parameter `{}` expects a PEG expression, but a value was passed", param.name ), ); } (RuleParamTy::Rule(..), RuleArg::Peg(..)) => (), (RuleParamTy::Rust(..), RuleArg::Rust(..)) => (), } } let func = format_ident!("__parse_{}", rule_name, span = rule_name.span()); let extra_args_call = &context.extra_args_call; let rule_args_call: Vec<TokenStream> = rule_args .iter() .map(|arg| match arg { RuleArg::Peg(e) => { let expr = compile_expr(context, e, true); quote! { |__input, __state, __err_state, __pos| { #expr } } } RuleArg::Rust(e) => e.clone(), }) .collect(); if result_used { quote! { #func(__input, __state, __err_state, __pos #extra_args_call #(, #rule_args_call)*) } } else { quote! { match #func(__input, __state, __err_state, __pos #extra_args_call #(, #rule_args_call)*){ ::peg::RuleResult::Matched(pos, _) => ::peg::RuleResult::Matched(pos, ()), ::peg::RuleResult::Failed => ::peg::RuleResult::Failed, } } } } MethodExpr(ref method, ref args) => { quote! { __input.#method(__pos, #args) } } ChoiceExpr(ref exprs) => ordered_choice( exprs .iter() .map(|expr| compile_expr(context, expr, result_used)), ), OptionalExpr(ref e) => { let optional_res = compile_expr(context, e, result_used); if result_used { quote! { match #optional_res { ::peg::RuleResult::Matched(__newpos, __value) => { ::peg::RuleResult::Matched(__newpos, Some(__value)) }, ::peg::RuleResult::Failed => { ::peg::RuleResult::Matched(__pos, None) }, } } } else { quote! { match #optional_res { ::peg::RuleResult::Matched(__newpos, _) => { ::peg::RuleResult::Matched(__newpos, ()) }, ::peg::RuleResult::Failed => { ::peg::RuleResult::Matched(__pos, ()) }, } } } } Repeat(ref e, ref bounds, ref sep) => { let inner = compile_expr(context, e, result_used); let (min, max) = match *bounds { BoundedRepeat::None => (None, None), BoundedRepeat::Plus => (Some(quote!(1)), None), BoundedRepeat::Exact(ref code) => (Some(code.clone()), Some(code.clone())), BoundedRepeat::Both(ref min, ref max) => (min.clone(), max.clone()), }; let match_sep = if let Some(sep) = sep { let sep_inner = compile_expr(context, sep, false); quote! { let __pos = if __repeat_value.is_empty() { __pos } else { let __sep_res = #sep_inner; match __sep_res { ::peg::RuleResult::Matched(__newpos, _) => { __newpos }, ::peg::RuleResult::Failed => break, } }; } } else { quote!() }; let result = if result_used { quote!(__repeat_value) } else { quote!(()) }; let (repeat_vec, repeat_step) = if result_used || min.is_some() || max.is_some() || sep.is_some() { ( Some(quote! { let mut __repeat_value = vec!(); }), Some(quote! { __repeat_value.push(__value); }), ) } else { (None, None) }; let max_check = max.map(|max| { quote! { if __repeat_value.len() >= #max { break } } }); let result_check = if let Some(min) = min { quote! { if __repeat_value.len() >= #min { ::peg::RuleResult::Matched(__repeat_pos, #result) } else { ::peg::RuleResult::Failed } } } else { quote! { ::peg::RuleResult::Matched(__repeat_pos, #result) } }; quote! {{ let mut __repeat_pos = __pos; #repeat_vec loop { let __pos = __repeat_pos; #match_sep #max_check let __step_res = #inner; match __step_res { ::peg::RuleResult::Matched(__newpos, __value) => { __repeat_pos = __newpos; #repeat_step }, ::peg::RuleResult::Failed => { break; } } } #result_check }} } PosAssertExpr(ref e) => { let assert_res = compile_expr(context, e, result_used); quote! {{ __err_state.suppress_fail += 1; let __assert_res = #assert_res; __err_state.suppress_fail -= 1; match __assert_res { ::peg::RuleResult::Matched(_, __value) => ::peg::RuleResult::Matched(__pos, __value), ::peg::RuleResult::Failed => ::peg::RuleResult::Failed, } }} } NegAssertExpr(ref e) => { let assert_res = compile_expr(context, e, false); quote! {{ __err_state.suppress_fail += 1; let __assert_res = #assert_res; __err_state.suppress_fail -= 1; match __assert_res { ::peg::RuleResult::Failed => ::peg::RuleResult::Matched(__pos, ()), ::peg::RuleResult::Matched(..) => ::peg::RuleResult::Failed, } }} } ActionExpr(ref exprs, ref code) => labeled_seq(context, &exprs, { if let Some(code) = code { // Peek and see if the first token in the block is '?'. If so, it's a conditional block if let Some(body) = group_check_prefix(&code, '?') { quote_spanned!{code.span() => match (||{ #body })() { Ok(res) => ::peg::RuleResult::Matched(__pos, res), Err(expected) => { __err_state.mark_failure(__pos, expected); ::peg::RuleResult::Failed }, } } } else { quote_spanned!{code.span() => ::peg::RuleResult::Matched(__pos, (||#code)()) } } } else { quote!(::peg::RuleResult::Matched(__pos, ())) } }), MatchStrExpr(ref expr) => { let inner = compile_expr(context, expr, false); quote! {{ let str_start = __pos; match #inner { ::peg::RuleResult::Matched(__newpos, _) => { ::peg::RuleResult::Matched(__newpos, ::peg::ParseSlice::parse_slice(__input, str_start, __newpos)) }, ::peg::RuleResult::Failed => ::peg::RuleResult::Failed, } }} } PositionExpr => { quote! { ::peg::RuleResult::Matched(__pos, __pos) } } QuietExpr(ref expr) =>
FailExpr(ref expected) => { quote! {{ __err_state.mark_failure(__pos, #expected); ::peg::RuleResult::Failed }} } PrecedenceExpr { ref levels } => { let mut pre_rules = Vec::new(); let mut level_code = Vec::new(); let mut span_capture: Option<(TokenStream, TokenStream, TokenStream, &Group)> = None; for (prec, level) in levels.iter().enumerate() { let prec = prec as i32; let mut post_rules = Vec::new(); for op in &level.operators { if op.elements.len() < 1 { return quote!(compile_error!("incomplete rule")); } let left_arg = &op.elements[0]; let l_arg = name_or_ignore(left_arg.name.as_ref()); let right_arg = &op.elements[op.elements.len() - 1]; let r_arg = name_or_ignore(right_arg.name.as_ref()); let action = &op.action; let action = quote_spanned!(op.action.span()=>(||#action)()); let action = if let Some((lpos_name, val_name, rpos_name, wrap_action)) = &span_capture { quote_spanned!(wrap_action.span()=> (|#lpos_name, #val_name, #rpos_name|#wrap_action)(__lpos, #action, __pos)) } else { action }; match (&left_arg.expr, &right_arg.expr) { (&PositionExpr, &PositionExpr) if op.elements.len() == 3 => { // wrapper rule to capture expression span match &op.elements[1].expr { &MarkerExpr(..) => (), _ => { return quote!(compile_error!( "span capture rule must be `l:position!() n:@ r:position!()" )) } } span_capture = Some(( name_or_ignore(op.elements[0].name.as_ref()), name_or_ignore(op.elements[1].name.as_ref()), name_or_ignore(op.elements[2].name.as_ref()), &op.action, )); } (&MarkerExpr(la), &MarkerExpr(ra)) if op.elements.len() >= 3 => { //infix let new_prec = match (la, ra) { (true, false) => prec + 1, // left associative (false, true) => prec, // right associative _ => return quote!(compile_error!("precedence rules must use `@` and `(@)` to indicate associativity")) }; post_rules.push( labeled_seq(context, &op.elements[1..op.elements.len()-1], { quote!{ if let ::peg::RuleResult::Matched(__pos, #r_arg) = __recurse(__pos, #new_prec, __state, __err_state) { let #l_arg = __infix_result; __infix_result = #action; ::peg::RuleResult::Matched(__pos, ()) } else { ::peg::RuleResult::Failed } } }) ); } (&MarkerExpr(_), _) if op.elements.len() >= 2 => { // postfix post_rules.push(labeled_seq( context, &op.elements[1..op.elements.len()], { quote! { let #l_arg = __infix_result; __infix_result = #action; ::peg::RuleResult::Matched(__pos, ()) } }, )); } (_, &MarkerExpr(a)) if op.elements.len() >= 2 => { // prefix let new_prec = match a { true => prec, false => prec + 1, }; pre_rules.push( labeled_seq(context, &op.elements[..op.elements.len()-1], { quote!{ if let ::peg::RuleResult::Matched(__pos, #r_arg) = __recurse(__pos, #new_prec, __state, __err_state) { ::peg::RuleResult::Matched(__pos, #action) } else { ::peg::RuleResult::Failed } } }) ); } _ => { // atom pre_rules.push(labeled_seq(context, &op.elements, { quote! { ::peg::RuleResult::Matched(__pos, #action) } })); } }; } if !post_rules.is_empty() { level_code.push(quote! { if #prec >= __min_prec { #( if let ::peg::RuleResult::Matched(__pos, ()) = #post_rules { return (__infix_result, ::peg::RuleResult::Matched(__pos, ())); } )* } }); } } let (enter, leave) = if cfg!(feature = "trace") { ( quote! {println!("[PEG_TRACE] Entering level {}", min_prec);}, quote! {println!("[PEG_TRACE] Leaving level {}", min_prec);}, ) } else { (quote!(), quote!()) }; // The closures below must be defined within the function call to which they are passed // due to https://github.com/rust-lang/rust/issues/41078 quote! {{ fn __infix_parse<T, S>( state: &mut S, err_state: &mut ::peg::error::ErrorState, min_prec: i32, lpos: usize, prefix_atom: &Fn(usize, &mut S, &mut ::peg::error::ErrorState, &Fn(usize, i32, &mut S, &mut ::peg::error::ErrorState) -> ::peg::RuleResult<T>) -> ::peg::RuleResult<T>, level_code: &Fn(usize, usize, i32, T, &mut S, &mut ::peg::error::ErrorState, &Fn(usize, i32, &mut S, &mut ::peg::error::ErrorState) -> ::peg::RuleResult<T>) -> (T, ::peg::RuleResult<()>), ) -> ::peg::RuleResult<T> { let initial = { prefix_atom(lpos, state, err_state, &|pos, min_prec, state, err_state| { __infix_parse(state, err_state, min_prec, pos, prefix_atom, level_code) }) }; if let ::peg::RuleResult::Matched(pos, mut infix_result) = initial { #enter let mut repeat_pos = pos; loop { let (val, res) = level_code( repeat_pos, lpos, min_prec, infix_result, state, err_state, &|pos, min_prec, state, err_state| { __infix_parse(state, err_state, min_prec, pos, prefix_atom, level_code) } ); infix_result = val; if let ::peg::RuleResult::Matched(pos, ()) = res { repeat_pos = pos; continue; } break; } #leave ::peg::RuleResult::Matched(repeat_pos, infix_result) } else { ::peg::RuleResult::Failed } } __infix_parse(__state, __err_state, 0, __pos, &|__pos, __state, __err_state, __recurse| { let __lpos = __pos; #( if let ::peg::RuleResult::Matched(__pos, __v) = #pre_rules { return ::peg::RuleResult::Matched(__pos, __v); } )* ::peg::RuleResult::Failed }, &|__pos, __lpos, __min_prec, mut __infix_result, __state, __err_state, __recurse| { #(#level_code)* (__infix_result, ::peg::RuleResult::Failed) } ) }} } MarkerExpr { .. } => { return quote!(compile_error!("`@` is only allowed in `precedence!{}`")); } } }
{ let inner = compile_expr(context, expr, result_used); quote! {{ __err_state.suppress_fail += 1; let res = #inner; __err_state.suppress_fail -= 1; res }} }
main.go
package main import ( "github.com/astaxie/beego" "github.com/wangxianzhuo/math-util/controllers" ) func main() { beego.Router("/det/value", &controllers.DetController{}, "get:Value") beego.Router("/det/expanse", &controllers.DetController{}, "get:Expanse") beego.Router("/equations/solve", &controllers.EquationsController{}, "get:Solve") beego.Router("/util/inversionnumber", &controllers.UtilController{}, "get:InversionNumber") beego.ErrorController(&ErrorController{}) beego.SetLogFuncCall(true) beego.Run() } type ErrorController struct { beego.Controller } type ErrMessage struct { Message string }
e.ServeJSON() } func (e ErrorController) Error404() { e.Data["json"] = ErrMessage{ Message: "not found", } e.ServeJSON() }
func (e ErrorController) Error500() { e.Data["json"] = ErrMessage{ Message: "internal server error", }
content.ts
import { Router } from 'express'; import { param } from 'express-validator'; import { Pool } from 'mysql2/promise'; import { ContentController } from '../controllers'; import { catchExceptions } from '../lib/utils'; const router = Router(); // for /content/... export function
(pool: Pool) { const controller = new ContentController(pool); router.get( '/', catchExceptions(controller.view) ); router.get( '/links/:name', [param('name').not().isEmpty().trim().escape()], catchExceptions(controller.getLinksByType) ); router.get( '/:id', [param('id').not().isEmpty().trim().escape()], catchExceptions(controller.viewById) ); return router; }
contentRouter
App.js
import "./App.css"; import { withStyles } from "@material-ui/core/styles"; import AppBar from "@material-ui/core/AppBar"; import Button from "@material-ui/core/Button"; import IconButton from "@material-ui/core/IconButton"; import MenuIcon from "@material-ui/icons/Menu"; import PropTypes from "prop-types"; import React, { Component } from "react"; import Toolbar from "@material-ui/core/Toolbar";
import Typography from "@material-ui/core/Typography"; const styles = { root: { flexGrow: 1 }, grow: { flexGrow: 1 }, menuButton: { marginLeft: -12, marginRight: 20 } }; class App extends Component { render() { const { classes } = this.props; return ( <div className={classes.root}> <AppBar position="static"> <Toolbar> <IconButton className={classes.menuButton} color="inherit" aria-label="Menu" > <MenuIcon /> </IconButton> <Typography variant="h6" color="inherit" className={classes.grow}> News </Typography> <Button color="inherit">Login</Button> </Toolbar> </AppBar> </div> ); } } App.propTypes = { classes: PropTypes.object.isRequired }; export default withStyles(styles)(App);
demo_test.go
package baremetalhost import ( goctx "context" "testing" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" metalkubeapis "github.com/metal3-io/baremetal-operator/pkg/apis" metalkubev1alpha1 "github.com/metal3-io/baremetal-operator/pkg/apis/metalkube/v1alpha1" "github.com/metal3-io/baremetal-operator/pkg/provisioner/demo" ) func init() { logf.SetLogger(logf.ZapLogger(true)) // Register our package types with the global scheme metalkubeapis.AddToScheme(scheme.Scheme) } func newDemoReconciler(initObjs ...runtime.Object) *ReconcileBareMetalHost { c := fakeclient.NewFakeClient(initObjs...) // Add a default secret that can be used by most hosts. c.Create(goctx.TODO(), newSecret(defaultSecretName, "User", "Pass")) return &ReconcileBareMetalHost{ client: c, scheme: scheme.Scheme, provisionerFactory: demo.New, } } // TestDemoRegistrationError tests that a host with the right name reports // a registration error func TestDemoRegistrationError(t *testing.T) { host := newDefaultNamedHost(demo.RegistrationErrorHost, t) r := newDemoReconciler(host) tryReconcile(t, r, host, func(host *metalkubev1alpha1.BareMetalHost, result reconcile.Result) bool { t.Logf("Status: %q State: %q ErrorMessage: %q", host.OperationalStatus(), host.Status.Provisioning.State, host.Status.ErrorMessage, ) return host.HasError() }, ) } // TestDemoRegistering tests that a host with the right name reports // that it is being registered func TestDemoRegistering(t *testing.T)
// TestDemoInspecting tests that a host with the right name reports // that it is being inspected func TestDemoInspecting(t *testing.T) { host := newDefaultNamedHost(demo.InspectingHost, t) r := newDemoReconciler(host) tryReconcile(t, r, host, func(host *metalkubev1alpha1.BareMetalHost, result reconcile.Result) bool { t.Logf("Status: %q State: %q ErrorMessage: %q", host.OperationalStatus(), host.Status.Provisioning.State, host.Status.ErrorMessage, ) return host.Status.Provisioning.State == metalkubev1alpha1.StateInspecting }, ) } // TestDemoReady tests that a host with the right name reports // that it is ready to be provisioned func TestDemoReady(t *testing.T) { host := newDefaultNamedHost(demo.ReadyHost, t) r := newDemoReconciler(host) tryReconcile(t, r, host, func(host *metalkubev1alpha1.BareMetalHost, result reconcile.Result) bool { t.Logf("Status: %q State: %q ErrorMessage: %q", host.OperationalStatus(), host.Status.Provisioning.State, host.Status.ErrorMessage, ) return host.Status.Provisioning.State == metalkubev1alpha1.StateReady }, ) } // TestDemoProvisioning tests that a host with the right name reports // that it is being provisioned func TestDemoProvisioning(t *testing.T) { host := newDefaultNamedHost(demo.ProvisioningHost, t) host.Spec.Image = &metalkubev1alpha1.Image{ URL: "a-url", Checksum: "a-checksum", } host.Spec.Online = true r := newDemoReconciler(host) tryReconcile(t, r, host, func(host *metalkubev1alpha1.BareMetalHost, result reconcile.Result) bool { t.Logf("Status: %q State: %q ErrorMessage: %q", host.OperationalStatus(), host.Status.Provisioning.State, host.Status.ErrorMessage, ) return host.Status.Provisioning.State == metalkubev1alpha1.StateProvisioning }, ) } // TestDemoProvisioned tests that a host with the right name reports // that it has been provisioned func TestDemoProvisioned(t *testing.T) { host := newDefaultNamedHost(demo.ProvisionedHost, t) host.Spec.Image = &metalkubev1alpha1.Image{ URL: "a-url", Checksum: "a-checksum", } host.Spec.Online = true r := newDemoReconciler(host) tryReconcile(t, r, host, func(host *metalkubev1alpha1.BareMetalHost, result reconcile.Result) bool { t.Logf("Status: %q State: %q ErrorMessage: %q", host.OperationalStatus(), host.Status.Provisioning.State, host.Status.ErrorMessage, ) return host.Status.Provisioning.State == metalkubev1alpha1.StateProvisioned }, ) } // TestDemoValidationError tests that a host with the right name // reports that it had and error while being provisioned func TestDemoValidationError(t *testing.T) { host := newDefaultNamedHost(demo.ValidationErrorHost, t) host.Spec.Image = &metalkubev1alpha1.Image{ URL: "a-url", Checksum: "a-checksum", } host.Spec.Online = true r := newDemoReconciler(host) tryReconcile(t, r, host, func(host *metalkubev1alpha1.BareMetalHost, result reconcile.Result) bool { t.Logf("Status: %q State: %q ErrorMessage: %q", host.OperationalStatus(), host.Status.Provisioning.State, host.Status.ErrorMessage, ) return host.HasError() }, ) }
{ host := newDefaultNamedHost(demo.RegisteringHost, t) r := newDemoReconciler(host) tryReconcile(t, r, host, func(host *metalkubev1alpha1.BareMetalHost, result reconcile.Result) bool { t.Logf("Status: %q State: %q ErrorMessage: %q", host.OperationalStatus(), host.Status.Provisioning.State, host.Status.ErrorMessage, ) return host.Status.Provisioning.State == metalkubev1alpha1.StateRegistering }, ) }
example.py
# Copyright (c) 2016 by Matt Sewall. # All rights reserved. import math import csv import json import os import shutil from sys import argv from datetime import datetime from django.utils.encoding import smart_str, smart_unicode from operator import itemgetter from elo_classes import * from elo import * # Elos dictionaries contain athletes keyed to an elo value # Entries dictionaries contain athletes keyed to history of their results elos_boys = {} elos_girls = {} entries_boys = {} entries_girls = {} _DEFELO = 1500.0 def do_elo(data, meetName, meetDate, gender): if gender == "female": elos = elos_girls entries = entries_girls elif gender == "male": elos = elos_boys entries = entries_boys # Add players to competition and calculate elos meet = Meet() meet.competitors = [] for dat in data: name = dat[0] place = int(dat[1]) school = dat[2] ath = Athlete(name, school) if ath in elos: elo = float(elos.get(ath)) meet.addCompetitor(name, place, elo, school) else: # defaults to elo of 1500 on athletes first meet meet.addCompetitor(name, place, _DEFELO, school) calculateElo(meet.competitors) # Take results of competition and append data for runner in meet.competitors: ather = Athlete(runner.name, runner.school) elos[ather] = runner.elo if ather in entries: res_list = entries.get(ather) res_list.append([meetName, meetDate, runner.elo]) entries[ather] = res_list else: entries[ather] = [[meetName, meetDate, runner.elo]] def align_data(filename): filex = open(filename) sort = [] for json_string in filex: parsed = json.loads(json_string) results = parsed["results"] kill = False locs = parsed["meetLocation"] a_date = parsed["meetDate"] exact_date = datetime.strptime(a_date[0], "%A, %B %d, %Y") for loc in locs: if loc == u'Collegiate' or loc == u'MS': kill = True for result in results: if result.keys() == [u'maleResults'] or [u'femaleResults']: static = result.values() events = static[0] for event in events: data = [] data.append(exact_date) data.append(parsed['meetName']) if result.keys() == [u'maleResults']: data.append("male") elif result.keys() == [u'femaleResults']: data.append("female") places = [] details = event[u'eventDetails'] for detail in details: killx = False ath_detail_List = [] ath_detail_List.append( smart_str(detail[u'resultName'])) if detail[u'resultPlace'] == " " or \ detail[u'resultPlace'] == u' ': killx = True else: ath_detail_List.append(detail[u'resultPlace']) ath_detail_List.append( smart_str(detail[u'resultSchool'])) if killx is False: places.append(ath_detail_List) data.append(places) if kill is False: sort.append(data) sortx = sorted(sort, key=itemgetter(0)) return sortx def write_ath(entries): if entries == entries_boys: path = "./meets/boys" elif entries == entries_girls: path = "./meets/girls" if not os.path.exists("./meets/"): os.mkdir("./meets/") if not os.path.exists(path): os.mkdir(path + "/") for ath in entries: school_path = os.path.join(path, ath.school) ath_path = os.path.join(school_path, ath.name + ".csv") filename = "%s.csv" % ath.name with open((filename), "w") as fp: a = csv.writer(fp, delimiter=',') a.writerows(entries[ath]) if os.path.exists(school_path): shutil.move(filename, ath_path) else: os.mkdir(school_path) shutil.move(filename, ath_path) def
(elos, gender): if gender == "male": name = "athlete_elo_boys.csv" elif gender == "female": name = "athlete_elo_girls.csv" with open((name), "w") as fp: a = csv.writer(fp, delimiter=',') a.writerows(elos) def main(): # isolates every event and pass that into the do_elo function by gender events = align_data(argv[1]) count = 0 for event in events: # makes sure there are 4 values of (name, date, gender, and results) if len(event) == 4: print count count += 1 name = smart_str(event[1][0]) date = event[0] gender = event[2] do_elo(event[3], name, date, gender) # sorts the dictionaries by ascending elo sorted_boys = sorted(elos_boys.items(), key=itemgetter(1)) sorted_girls = sorted(elos_girls.items(), key=itemgetter(1)) write_elo(sorted_boys, "male") write_elo(sorted_girls, "female") write_ath(entries_girls) write_ath(entries_boys) if __name__ == '__main__': main()
write_elo
explicit-project-dependencies.js
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.buildExplicitTypeScriptDependencies = void 0; const typescript_import_locator_1 = require("./typescript-import-locator"); const target_project_locator_1 = require("../../target-project-locator"); function
(ctx, builder) { const importLocator = new typescript_import_locator_1.TypeScriptImportLocator(); const targetProjectLocator = new target_project_locator_1.TargetProjectLocator(builder.graph.nodes); Object.keys(ctx.filesToProcess).forEach((source) => { Object.values(ctx.filesToProcess[source]).forEach((f) => { importLocator.fromFile(f.file, (importExpr, filePath, type) => { const target = targetProjectLocator.findProjectWithImport(importExpr, f.file, ctx.workspace.npmScope); if (target) { builder.addExplicitDependency(source, f.file, target); } }); }); }); } exports.buildExplicitTypeScriptDependencies = buildExplicitTypeScriptDependencies; //# sourceMappingURL=explicit-project-dependencies.js.map
buildExplicitTypeScriptDependencies
client.go
package main import( "fmt" "math" "net" "os" "time" ) func main(){ out := "" lastFlush := 0 sinceLast := 0 conn, _ := net.DialTCP("tcp", nil, &net.TCPAddr{net.ParseIP(os.Args[1]), 8080, ""}) conn.Write([]byte("GET / HTTP 1.1\r\n\r\n")) //synchronize var buff = make([]byte, 3000) i, err := conn.Read(buff) lastTime := time.Now() startTime := lastTime os.Stderr.Write(buff[:i]) for err = nil; err == nil; { i, err = conn.Read(buff) cTime := time.Now() os.Stderr.Write(buff[:i]) //Determine the number of time cycles passed zeros := int(cTime.Sub(lastTime) / 5 / time.Millisecond) for i := 0; i < zeros; i++{ out += "0" sinceLast++ if sinceLast == 16{ os.Stdout.Write(binconvert(demanchester(out[lastFlush:lastFlush + 16]))) lastFlush += 16 sinceLast = 0 } } if err == nil{ out += "1" sinceLast++ if sinceLast == 16{ os.Stdout.Write(binconvert(demanchester(out[lastFlush:lastFlush + 16]))) lastFlush += 16 sinceLast = 0 } } lastTime = cTime } fmt.Fprintf(os.Stderr, "Rate: %fbps\n", float64(lastFlush) / float64(time.Now().Sub(startTime)) * float64(time.Second)) //fmt.Println("\nThe message was: " + out) //fmt.Println("The message really was: " + string(binconvert(out))) } func demanchester(in string) (string){ ret := "" for b := 0; b < len(in); b += 2{ ret += string(in[b]) } return ret } func
(in string) ([]byte){ l := len(in) / 8 var ret = make([]byte, l) for i := 0; i < l; i++{ for j := 0; j < 8; j++{ if in[i * 8 + (7 - j)] == 0x31{ ret[i] += byte(math.Pow(2, float64(j))) } } } return ret }
binconvert
grpc_server_test.go
// Copyright (c) 2018 The Jaeger Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcserver import ( "context" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" "google.golang.org/grpc" "google.golang.org/grpc/test/bufconn" "github.com/jaegertracing/jaeger/cmd/collector/app" "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/proto-gen/api_v2" "github.com/jaegertracing/jaeger/thrift-gen/sampling" ) // test wrong port number func TestFailToListen(t *testing.T) { l, _ := zap.NewDevelopment() handler := app.NewGRPCHandler(l, &mockSpanProcessor{}) server := grpc.NewServer() const invalidPort = -1 addr, err := StartGRPCCollector(invalidPort, server, handler, &mockSamplingStore{}, l, func(e error) { }) assert.Nil(t, addr) assert.EqualError(t, err, "Failed to listen on gRPC port: listen tcp: address -1: invalid port") } func
(t *testing.T) { lis := bufconn.Listen(0) lis.Close() core, logs := observer.New(zap.NewAtomicLevelAt(zapcore.ErrorLevel)) var wg sync.WaitGroup wg.Add(1) startServer(grpc.NewServer(), lis, zap.New(core), func(e error) { assert.Equal(t, 1, len(logs.All())) assert.Equal(t, "Could not launch gRPC service", logs.All()[0].Message) wg.Done() }) wg.Wait() } func TestSpanCollector(t *testing.T) { l, _ := zap.NewDevelopment() handler := app.NewGRPCHandler(l, &mockSpanProcessor{}) server := grpc.NewServer() addr, err := StartGRPCCollector(0, server, handler, &mockSamplingStore{}, l, func(e error) { }) require.NoError(t, err) conn, err := grpc.Dial(addr.String(), grpc.WithInsecure()) defer conn.Close() defer server.Stop() require.NoError(t, err) c := api_v2.NewCollectorServiceClient(conn) response, err := c.PostSpans(context.Background(), &api_v2.PostSpansRequest{}) require.NoError(t, err) require.NotNil(t, response) } type mockSamplingStore struct{} func (s mockSamplingStore) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) { return nil, nil } type mockSpanProcessor struct { } func (p *mockSpanProcessor) ProcessSpans(spans []*model.Span, spanFormat string) ([]bool, error) { return []bool{}, nil }
TestFailServe
nested_item_main.rs
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:nested_item.rs // pretty-expanded FIXME #23616 extern crate nested_item; pub fn main()
{ assert_eq!(2, nested_item::foo::<()>()); assert_eq!(2, nested_item::foo::<isize>()); }
dpt_time_test.py
"""Unit test for KNX time objects.""" import unittest from xknx.dpt import DPTTime, DPTWeekday from xknx.exceptions import ConversionError class TestDPTTime(unittest.TestCase): """Test class for KNX time objects.""" # # TEST NORMAL TIME # def test_from_knx(self): """Test parsing of DPTTime object from binary values. Example 1.""" self.assertEqual(DPTTime().from_knx((0x4D, 0x17, 0x2A)), {'weekday': DPTWeekday.TUESDAY, 'hours': 13, 'minutes': 23, 'seconds': 42}) def test_to_knx(self): """Testing KNX/Byte representation of DPTTime object.""" raw = DPTTime().to_knx( {'weekday': DPTWeekday.TUESDAY, 'hours': 13, 'minutes': 23, 'seconds': 42}) self.assertEqual(raw, (0x4D, 0x17, 0x2A)) # # TEST MAXIMUM TIME # def test_to_knx_max(self): """Testing KNX/Byte representation of DPTTime object. Maximum values.""" raw = DPTTime().to_knx( {'weekday': DPTWeekday.SUNDAY, 'hours': 23, 'minutes': 59, 'seconds': 59}) self.assertEqual(raw, (0xF7, 0x3b, 0x3b)) def test_from_knx_max(self): """Test parsing of DPTTime object from binary values. Example 2.""" self.assertEqual(DPTTime().from_knx((0xF7, 0x3b, 0x3b)), {'weekday': DPTWeekday.SUNDAY, 'hours': 23, 'minutes': 59, 'seconds': 59}) # # TEST MINIMUM TIME # def test_to_knx_min(self): """Testing KNX/Byte representation of DPTTime object. Minimum values.""" raw = DPTTime().to_knx( {'weekday': DPTWeekday.NONE, 'hours': 0, 'minutes': 0, 'seconds': 0}) self.assertEqual(raw, (0x0, 0x0, 0x0)) def test_from_knx_min(self): """Test parsing of DPTTime object from binary values. Example 3.""" self.assertEqual(DPTTime().from_knx((0x0, 0x0, 0x0)), {'weekday': DPTWeekday.NONE, 'hours': 0, 'minutes': 0, 'seconds': 0}) # # TEST INITIALIZATION # def test_to_knx_default(self):
def test_from_knx_wrong_size(self): """Test parsing from DPTTime object from wrong binary values (wrong size).""" with self.assertRaises(ConversionError): DPTTime().from_knx((0xF8, 0x23)) def test_from_knx_wrong_bytes(self): """Test parsing from DPTTime object from wrong binary values (wrong bytes).""" with self.assertRaises(ConversionError): # thirs parameter exceeds limit DPTTime().from_knx((0xF7, 0x3b, 0x3c)) def test_from_knx_wrong_type(self): """Test parsing from DPTTime object from wrong binary values (wrong type).""" with self.assertRaises(ConversionError): DPTTime().from_knx((0xF8, "0x23")) def test_to_knx_wrong_parameter(self): """Test parsing from DPTTime object from wrong string value.""" with self.assertRaises(ConversionError): DPTTime().to_knx("fnord") def test_to_knx_wrong_seconds(self): """Test parsing from DPTTime object from wrong seconds value.""" with self.assertRaises(ConversionError): DPTTime().to_knx({ 'hours': 12, 'minutes': 42, 'seconds': 61 }) def test_to_knx_wrong_minutes(self): """Test parsing from DPTTime object from wrong minutes value.""" with self.assertRaises(ConversionError): DPTTime().to_knx({ 'hours': 12, 'minutes': 61, 'seconds': 53 }) def test_to_knx_wrong_hours(self): """Test parsing from DPTTime object from wrong hours value.""" with self.assertRaises(ConversionError): DPTTime().to_knx({ 'hours': 24, 'minutes': 42, 'seconds': 53 }) def test_test_range_wrong_weekday(self): """Test range testing with wrong weekday (Cant be tested with normal from_/to_knx).""" # pylint: disable=protected-access self.assertFalse(DPTTime._test_range(8, 0, 0, 0))
"""Testing default initialization of DPTTime object.""" self.assertEqual(DPTTime().to_knx({}), (0x0, 0x0, 0x0))
testcases.py
# -*- coding: utf-8 -*- from cms.models import Page from cms.test_utils.util.context_managers import (UserLoginContext, SettingsOverride) from django.conf import settings from django.contrib.auth.models import AnonymousUser from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.template.context import Context from django.test import testcases from django.test.client import RequestFactory from django.utils.translation import activate from menus.menu_pool import menu_pool from urlparse import urljoin import sys import urllib import warnings from cms.utils.permissions import set_current_user from cms.compat import User URL_CMS_PAGE = "/en/admin/cms/page/" URL_CMS_PAGE_ADD = urljoin(URL_CMS_PAGE, "add/") URL_CMS_PAGE_CHANGE = urljoin(URL_CMS_PAGE, "%d/") URL_CMS_PAGE_CHANGE_LANGUAGE = URL_CMS_PAGE_CHANGE + "?language=%s" URL_CMS_PAGE_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete/") URL_CMS_PLUGIN_ADD = urljoin(URL_CMS_PAGE_CHANGE, "add-plugin/") URL_CMS_PLUGIN_EDIT = urljoin(URL_CMS_PAGE_CHANGE, "edit-plugin/") URL_CMS_PLUGIN_MOVE = urljoin(URL_CMS_PAGE_CHANGE, "move-plugin/") URL_CMS_PLUGIN_REMOVE = urljoin(URL_CMS_PAGE_CHANGE, "remove-plugin/") URL_CMS_TRANSLATION_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete-translation/") URL_CMS_PAGE_HISTORY = urljoin(URL_CMS_PAGE_CHANGE, "history/%d/") URL_CMS_PLUGIN_HISTORY_EDIT = urljoin(URL_CMS_PAGE_HISTORY, "edit-plugin/") class _Warning(object): def __init__(self, message, category, filename, lineno): self.message = message self.category = category self.filename = filename self.lineno = lineno def _collectWarnings(observeWarning, f, *args, **kwargs): def showWarning(message, category, filename, lineno, file=None, line=None): assert isinstance(message, Warning) observeWarning(_Warning( message.args[0], category, filename, lineno)) # Disable the per-module cache for every module otherwise if the warning # which the caller is expecting us to collect was already emitted it won't # be re-emitted by the call to f which happens below. for v in sys.modules.itervalues(): if v is not None: try: v.__warningregistry__ = None except: # Don't specify a particular exception type to handle in case # some wacky object raises some wacky exception in response to # the setattr attempt. pass origFilters = warnings.filters[:] origShow = warnings.showwarning warnings.simplefilter('always') try: warnings.showwarning = showWarning result = f(*args, **kwargs) finally: warnings.filters[:] = origFilters warnings.showwarning = origShow return result class CMSTestCase(testcases.TestCase): counter = 1 def _fixture_setup(self): super(CMSTestCase, self)._fixture_setup() self.create_fixtures() activate("en") def create_fixtures(self): pass def _post_teardown(self): # Needed to clean the menu keys cache, see menu.menu_pool.clear() menu_pool.clear() super(CMSTestCase, self)._post_teardown() set_current_user(None) def login_user_context(self, user): return UserLoginContext(self, user) def get_superuser(self): try: admin = User.objects.get(username="admin") except User.DoesNotExist: admin = User(username="admin", is_staff=True, is_active=True, is_superuser=True) admin.set_password("admin") admin.save() return admin def get_staff_user_with_no_permissions(self): """ Used in security tests """ staff = User(username="staff", is_staff=True, is_active=True) staff.set_password("staff") staff.save() return staff def get_new_page_data(self, parent_id=''): page_data = { 'title': 'test page %d' % self.counter, 'slug': 'test-page-%d' % self.counter, 'language': settings.LANGUAGES[0][0], 'template': 'nav_playground.html', 'parent': parent_id, 'site': 1, } # required only if user haves can_change_permission page_data['pagepermission_set-TOTAL_FORMS'] = 0 page_data['pagepermission_set-INITIAL_FORMS'] = 0 page_data['pagepermission_set-MAX_NUM_FORMS'] = 0 page_data['pagepermission_set-2-TOTAL_FORMS'] = 0 page_data['pagepermission_set-2-INITIAL_FORMS'] = 0 page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0 self.counter = self.counter + 1 return page_data def get_new_page_data_dbfields(self, parent=None, site=None, language=None, template='nav_playground.html',): page_data = { 'title': 'test page %d' % self.counter, 'slug': 'test-page-%d' % self.counter, 'language': settings.LANGUAGES[0][0] if not language else language, 'template': template, 'parent': parent if parent else None, 'site': site if site else Site.objects.get_current(), } self.counter = self.counter + 1 return page_data def get_pagedata_from_dbfields(self, page_data): """Converts data created by get_new_page_data_dbfields to data created from get_new_page_data so you can switch between test cases in api.create_page and client.post""" page_data['site'] = page_data['site'].id page_data['parent'] = page_data['parent'].id if page_data['parent'] else '' # required only if user haves can_change_permission page_data['pagepermission_set-TOTAL_FORMS'] = 0 page_data['pagepermission_set-INITIAL_FORMS'] = 0 page_data['pagepermission_set-MAX_NUM_FORMS'] = 0 page_data['pagepermission_set-2-TOTAL_FORMS'] = 0 page_data['pagepermission_set-2-INITIAL_FORMS'] = 0 page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0 return page_data def print_page_structure(self, qs): """Just a helper to see the page struct. """ for page in qs.order_by('tree_id', 'lft'): ident = " " * page.level print "%s%s (%s), lft: %s, rght: %s, tree_id: %s" % (ident, page, page.pk, page.lft, page.rght, page.tree_id) def print_node_structure(self, nodes, *extra): def _rec(nodes, level=0): ident = level * ' ' for node in nodes: raw_attrs = [(bit, getattr(node, bit, node.attr.get(bit, "unknown"))) for bit in extra] attrs = ', '.join(['%s: %r' % data for data in raw_attrs]) print "%s%s: %s" % (ident, node.title, attrs) _rec(node.children, level + 1) _rec(nodes) def assertObjectExist(self, qs, **filter): try: return qs.get(**filter) except ObjectDoesNotExist: pass raise self.failureException, "ObjectDoesNotExist raised for filter %s" % filter def assertObjectDoesNotExist(self, qs, **filter): try: qs.get(**filter) except ObjectDoesNotExist: return raise self.failureException, "ObjectDoesNotExist not raised for filter %s" % filter def copy_page(self, page, target_page): from cms.utils.page import get_available_slug data = { 'position': 'last-child', 'target': target_page.pk, 'site': 1, 'copy_permissions': 'on', 'copy_moderation': 'on', } response = self.client.post(URL_CMS_PAGE + "%d/copy-page/" % page.pk, data) self.assertEquals(response.status_code, 200) # Altered to reflect the new django-js jsonified response messages self.assertEquals(response.content, '{"status": 200, "content": "ok"}') title = page.title_set.all()[0] copied_slug = get_available_slug(title) copied_page = self.assertObjectExist(Page.objects, title_set__slug=copied_slug, parent=target_page) return copied_page def move_page(self, page, target_page, position="first-child"): page.move_page(target_page, position) return self.reload_page(page) def reload_page(self, page): """ Returns a fresh instance of the page from the database """ return self.reload(page) def reload(self, obj): return obj.__class__.objects.get(pk=obj.pk) def get_pages_root(self): return urllib.unquote(reverse("pages-root")) def get_context(self, path=None): if not path: path = self.get_pages_root() context = {} request = self.get_request(path) context['request'] = request return Context(context) def get_request(self, path=None, language=None, post_data=None, enforce_csrf_checks=False): factory = RequestFactory() if not path: path = self.get_pages_root() if not language: language = settings.LANGUAGES[0][0] if post_data: request = factory.post(path, post_data) else: request = factory.get(path) request.session = self.client.session request.user = getattr(self, 'user', AnonymousUser()) request.LANGUAGE_CODE = language request._dont_enforce_csrf_checks = not enforce_csrf_checks class MockStorage(object): def __len__(self): return 0 def
(self): return iter([]) def add(self, level, message, extra_tags=''): pass def update(self, response): pass request._messages = MockStorage() return request def check_published_page_attributes(self, page): public_page = page.publisher_public if page.parent: self.assertEqual(page.parent_id, public_page.parent.publisher_draft.id) self.assertEqual(page.level, public_page.level) # TODO: add check for siblings draft_siblings = list(page.get_siblings(True).filter( publisher_is_draft=True ).order_by('tree_id', 'parent', 'lft')) public_siblings = list(public_page.get_siblings(True).filter( publisher_is_draft=False ).order_by('tree_id', 'parent', 'lft')) skip = 0 for i, sibling in enumerate(draft_siblings): if not sibling.publisher_public_id: skip += 1 continue self.assertEqual(sibling.id, public_siblings[i - skip].publisher_draft.id) def failUnlessWarns(self, category, message, f, *args, **kwargs): warningsShown = [] result = _collectWarnings(warningsShown.append, f, *args, **kwargs) if not warningsShown: self.fail("No warnings emitted") first = warningsShown[0] for other in warningsShown[1:]: if ((other.message, other.category) != (first.message, first.category)): self.fail("Can't handle different warnings") self.assertEqual(first.message, message) self.assertTrue(first.category is category) return result assertWarns = failUnlessWarns class SettingsOverrideTestCase(CMSTestCase): settings_overrides = {} def _pre_setup(self): self._enter_settings_override() super(SettingsOverrideTestCase, self)._pre_setup() def _enter_settings_override(self): self._settings_ctx_manager = SettingsOverride(**self.settings_overrides) self._settings_ctx_manager.__enter__() def _post_teardown(self): super(SettingsOverrideTestCase, self)._post_teardown() self._exit_settings_override() def _exit_settings_override(self): self._settings_ctx_manager.__exit__(None, None, None)
__iter__
lightmodules_test.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //go:build !integration // +build !integration package mb import ( "net/url" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" _ "github.com/elastic/beats/v7/libbeat/processors/add_id" ) // TestLightModulesAsModuleSource checks that registry correctly lists // metricsets when used with light modules func TestLightModulesAsModuleSource(t *testing.T) { logp.TestingSetup() type testMetricSet struct { name string module string isDefault bool hostParser HostParser } cases := map[string]struct { registered []testMetricSet expectedMetricSets map[string][]string expectedDefaultMetricSets map[string][]string }{ "no registered modules": { expectedMetricSets: map[string][]string{ "service": []string{"metricset", "nondefault"}, "broken": []string{}, "empty": []string{}, }, expectedDefaultMetricSets: map[string][]string{ "service": []string{"metricset"}, "broken": []string{}, "empty": []string{}, }, }, "same module registered (mixed modules case)": { registered: []testMetricSet{ {name: "other", module: "service"}, }, expectedMetricSets: map[string][]string{ "service": []string{"metricset", "nondefault", "other"}, }, expectedDefaultMetricSets: map[string][]string{ "service": []string{"metricset"}, }, }, "some metricsets registered": { registered: []testMetricSet{ {name: "other", module: "service"}, {name: "metricset", module: "something", isDefault: true}, {name: "metricset", module: "someotherthing"}, }, expectedMetricSets: map[string][]string{ "service": []string{"metricset", "nondefault", "other"}, "something": []string{"metricset"}, "someotherthing": []string{"metricset"}, }, expectedDefaultMetricSets: map[string][]string{ "service": []string{"metricset"}, "something": []string{"metricset"}, "someotherthing": []string{}, }, }, } fakeMetricSetFactory := func(base BaseMetricSet) (MetricSet, error) { return &base, nil } newRegistry := func(metricSets []testMetricSet) *Register { r := NewRegister() for _, m := range metricSets { opts := []MetricSetOption{} if m.isDefault { opts = append(opts, DefaultMetricSet()) } if m.hostParser != nil { opts = append(opts, WithHostParser(m.hostParser)) } r.MustAddMetricSet(m.module, m.name, fakeMetricSetFactory, opts...) } r.SetSecondarySource(NewLightModulesSource("testdata/lightmodules")) return r } for title, c := range cases { t.Run(title, func(t *testing.T) { r := newRegistry(c.registered) // Check metricsets for module, metricSets := range c.expectedMetricSets { t.Run("metricsets for "+module, func(t *testing.T) { assert.ElementsMatch(t, metricSets, r.MetricSets(module)) }) } // Check default metricsets for module, expected := range c.expectedDefaultMetricSets { t.Run("default metricsets for "+module, func(t *testing.T) { found, err := r.DefaultMetricSets(module) if len(expected) > 0 { assert.NoError(t, err) assert.ElementsMatch(t, expected, found) } else { assert.Error(t, err, "error expected when there are no default metricsets") } }) } }) } } func TestLoadModule(t *testing.T)
func TestNewModuleFromConfig(t *testing.T) { logp.TestingSetup() cases := map[string]struct { config common.MapStr err bool expectedOption string expectedQuery QueryParams expectedPeriod time.Duration }{ "normal module": { config: common.MapStr{"module": "foo", "metricsets": []string{"bar"}}, expectedOption: "default", expectedQuery: nil, }, "light module": { config: common.MapStr{"module": "service", "metricsets": []string{"metricset"}}, expectedOption: "test", expectedQuery: nil, }, "light module default metricset": { config: common.MapStr{"module": "service"}, expectedOption: "test", expectedQuery: nil, }, "light module override option": { config: common.MapStr{"module": "service", "option": "overriden"}, expectedOption: "overriden", expectedQuery: nil, }, "light module with query": { config: common.MapStr{"module": "service", "query": common.MapStr{"param": "foo"}}, expectedOption: "test", expectedQuery: QueryParams{"param": "foo"}, }, "light module with custom period": { config: common.MapStr{"module": "service", "period": "42s"}, expectedOption: "test", expectedPeriod: 42 * time.Second, expectedQuery: nil, }, "light module is broken": { config: common.MapStr{"module": "broken"}, err: true, }, "light metric set doesn't exist": { config: common.MapStr{"module": "service", "metricsets": []string{"notexists"}}, err: true, }, "disabled light module": { config: common.MapStr{"module": "service", "enabled": false}, err: true, }, "mixed module with standard and light metricsets": { config: common.MapStr{"module": "mixed", "metricsets": []string{"standard", "light"}}, expectedOption: "default", expectedQuery: nil, }, "mixed module with unregistered and light metricsets": { config: common.MapStr{"module": "mixedbroken", "metricsets": []string{"unregistered", "light"}}, err: true, }, } r := NewRegister() r.MustAddMetricSet("foo", "bar", newMetricSetWithOption) r.MustAddMetricSet("foo", "light", newMetricSetWithOption) r.MustAddMetricSet("mixed", "standard", newMetricSetWithOption) r.SetSecondarySource(NewLightModulesSource("testdata/lightmodules")) for title, c := range cases { t.Run(title, func(t *testing.T) { config, err := common.NewConfigFrom(c.config) require.NoError(t, err) module, metricSets, err := NewModule(config, r) if c.err { assert.Error(t, err) return } require.NoError(t, err) assert.Equal(t, c.config["module"].(string), module.Config().Module) if metricSetNames, ok := c.config["metricsets"].([]string); ok { assert.ElementsMatch(t, metricSetNames, module.Config().MetricSets) } assert.NotEmpty(t, metricSets) assert.NoError(t, err) for _, ms := range metricSets { t.Run(ms.Name(), func(t *testing.T) { ms, ok := ms.(*metricSetWithOption) require.True(t, ok) assert.Equal(t, c.expectedOption, ms.Option) assert.Equal(t, c.expectedQuery, ms.Module().Config().Query) expectedPeriod := c.expectedPeriod if expectedPeriod == 0 { expectedPeriod = DefaultModuleConfig().Period } assert.Equal(t, expectedPeriod, ms.Module().Config().Period) }) } }) } } func TestLightMetricSet_VerifyHostDataURI(t *testing.T) { const hostEndpoint = "ceph-restful:8003" const sampleHttpsEndpoint = "https://" + hostEndpoint r := NewRegister() r.MustAddMetricSet("http", "json", newMetricSetWithOption, WithHostParser(func(module Module, host string) (HostData, error) { u, err := url.Parse(host) if err != nil { return HostData{}, err } return HostData{ Host: u.Host, URI: host, }, nil })) r.SetSecondarySource(NewLightModulesSource("testdata/lightmodules")) config, err := common.NewConfigFrom( common.MapStr{ "module": "httpextended", "metricsets": []string{"extends"}, "hosts": []string{sampleHttpsEndpoint}, }) require.NoError(t, err) _, metricSets, err := NewModule(config, r) require.NoError(t, err) require.Len(t, metricSets, 1) assert.Equal(t, hostEndpoint, metricSets[0].Host()) assert.Equal(t, sampleHttpsEndpoint, metricSets[0].HostData().URI) } func TestLightMetricSet_WithoutHostParser(t *testing.T) { const sampleHttpsEndpoint = "https://ceph-restful:8003" r := NewRegister() r.MustAddMetricSet("http", "json", newMetricSetWithOption) r.SetSecondarySource(NewLightModulesSource("testdata/lightmodules")) config, err := common.NewConfigFrom( common.MapStr{ "module": "httpextended", "metricsets": []string{"extends"}, "hosts": []string{sampleHttpsEndpoint}, }) require.NoError(t, err) _, metricSets, err := NewModule(config, r) require.NoError(t, err) require.Len(t, metricSets, 1) assert.Equal(t, sampleHttpsEndpoint, metricSets[0].Host()) assert.Equal(t, sampleHttpsEndpoint, metricSets[0].HostData().URI) } func TestLightMetricSet_VerifyHostDataURI_NonParsableHost(t *testing.T) { const ( postgresHost = "host1:5432" postgresEndpoint = "postgres://user1:pass@host1:5432?connect_timeout=2" postgresParsed = "connect_timeout=3 host=host1 password=pass port=5432 user=user1" ) r := NewRegister() r.MustAddMetricSet("http", "json", newMetricSetWithOption, WithHostParser(func(module Module, host string) (HostData, error) { return HostData{ Host: postgresHost, URI: postgresParsed, }, nil })) r.SetSecondarySource(NewLightModulesSource("testdata/lightmodules")) config, err := common.NewConfigFrom( common.MapStr{ "module": "httpextended", "metricsets": []string{"extends"}, "hosts": []string{postgresEndpoint}, }) require.NoError(t, err) _, metricSets, err := NewModule(config, r) require.NoError(t, err) require.Len(t, metricSets, 1) assert.Equal(t, postgresHost, metricSets[0].Host()) assert.Equal(t, postgresParsed, metricSets[0].HostData().URI) } func TestNewModulesCallModuleFactory(t *testing.T) { logp.TestingSetup() r := NewRegister() r.MustAddMetricSet("foo", "bar", newMetricSetWithOption) r.SetSecondarySource(NewLightModulesSource("testdata/lightmodules")) called := false r.AddModule("foo", func(base BaseModule) (Module, error) { called = true return DefaultModuleFactory(base) }) config, err := common.NewConfigFrom(common.MapStr{"module": "service"}) require.NoError(t, err) _, _, err = NewModule(config, r) assert.NoError(t, err) assert.True(t, called, "module factory must be called if registered") } func TestProcessorsForMetricSet_UnknownModule(t *testing.T) { r := NewRegister() source := NewLightModulesSource("testdata/lightmodules") procs, err := source.ProcessorsForMetricSet(r, "nonexisting", "fake") require.Error(t, err) require.Nil(t, procs) } func TestProcessorsForMetricSet_UnknownMetricSet(t *testing.T) { r := NewRegister() source := NewLightModulesSource("testdata/lightmodules") procs, err := source.ProcessorsForMetricSet(r, "unpack", "nonexisting") require.Error(t, err) require.Nil(t, procs) } func TestProcessorsForMetricSet_ProcessorsRead(t *testing.T) { r := NewRegister() source := NewLightModulesSource("testdata/lightmodules") procs, err := source.ProcessorsForMetricSet(r, "unpack", "withprocessors") require.NoError(t, err) require.NotNil(t, procs) require.Len(t, procs.List, 1) } func TestProcessorsForMetricSet_ListModules(t *testing.T) { source := NewLightModulesSource("testdata/lightmodules") modules, err := source.Modules() require.NoError(t, err) // Check that regular file in directory is not listed as module require.FileExists(t, "testdata/lightmodules/regular_file") assert.NotContains(t, modules, "regular_file") expectedModules := []string{ "broken", "httpextended", "mixed", "mixedbroken", "service", "unpack", } assert.ElementsMatch(t, expectedModules, modules, "Modules found: %v", modules) } type metricSetWithOption struct { BaseMetricSet Option string } func newMetricSetWithOption(base BaseMetricSet) (MetricSet, error) { config := struct { Option string `config:"option"` }{ Option: "default", } err := base.Module().UnpackConfig(&config) if err != nil { return nil, err } return &metricSetWithOption{ BaseMetricSet: base, Option: config.Option, }, nil } func (*metricSetWithOption) Fetch(ReporterV2) error { return nil }
{ logp.TestingSetup() cases := []struct { name string exists bool err bool }{ { name: "service", exists: true, err: false, }, { name: "broken", exists: true, err: true, }, { name: "empty", exists: false, err: false, }, { name: "notexists", exists: false, err: false, }, } for _, c := range cases { register := NewRegister() r := NewLightModulesSource("testdata/lightmodules") t.Run(c.name, func(t *testing.T) { _, err := r.loadModule(register, c.name) if c.err { assert.Error(t, err) } assert.Equal(t, c.exists, r.HasModule(c.name)) }) } }
hfxoctrl1.rs
#[doc = "Reader of register HFXOCTRL1"] pub type R = crate::R<u32, super::HFXOCTRL1>; #[doc = "Writer for register HFXOCTRL1"] pub type W = crate::W<u32, super::HFXOCTRL1>; #[doc = "Register HFXOCTRL1 `reset()`'s with value 0x2000"] impl crate::ResetValue for super::HFXOCTRL1 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x2000 } } #[doc = "Sets the Amplitude Detection Level (mV)\n\nValue on reset: 2"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum PEAKDETTHR_A { #[doc = "0: 50mV amplitude detection level"] THR0 = 0, #[doc = "1: 75mV amplitude detection level"] THR1 = 1, #[doc = "2: 115mV amplitude detection level"] THR2 = 2, #[doc = "3: 160mV amplitude detection level"] THR3 = 3, #[doc = "4: 220mV amplitude detection level"] THR4 = 4, #[doc = "5: 260mV amplitude detection level"] THR5 = 5, #[doc = "6: 320mV amplitude detection level"] THR6 = 6, #[doc = "7: Same as THR6"] THR7 = 7, } impl From<PEAKDETTHR_A> for u8 { #[inline(always)] fn from(variant: PEAKDETTHR_A) -> Self { variant as _ } } #[doc = "Reader of field `PEAKDETTHR`"] pub type PEAKDETTHR_R = crate::R<u8, PEAKDETTHR_A>; impl PEAKDETTHR_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PEAKDETTHR_A { match self.bits { 0 => PEAKDETTHR_A::THR0, 1 => PEAKDETTHR_A::THR1, 2 => PEAKDETTHR_A::THR2, 3 => PEAKDETTHR_A::THR3, 4 => PEAKDETTHR_A::THR4, 5 => PEAKDETTHR_A::THR5, 6 => PEAKDETTHR_A::THR6, 7 => PEAKDETTHR_A::THR7, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `THR0`"] #[inline(always)] pub fn is_thr0(&self) -> bool { *self == PEAKDETTHR_A::THR0 } #[doc = "Checks if the value of the field is `THR1`"] #[inline(always)] pub fn
(&self) -> bool { *self == PEAKDETTHR_A::THR1 } #[doc = "Checks if the value of the field is `THR2`"] #[inline(always)] pub fn is_thr2(&self) -> bool { *self == PEAKDETTHR_A::THR2 } #[doc = "Checks if the value of the field is `THR3`"] #[inline(always)] pub fn is_thr3(&self) -> bool { *self == PEAKDETTHR_A::THR3 } #[doc = "Checks if the value of the field is `THR4`"] #[inline(always)] pub fn is_thr4(&self) -> bool { *self == PEAKDETTHR_A::THR4 } #[doc = "Checks if the value of the field is `THR5`"] #[inline(always)] pub fn is_thr5(&self) -> bool { *self == PEAKDETTHR_A::THR5 } #[doc = "Checks if the value of the field is `THR6`"] #[inline(always)] pub fn is_thr6(&self) -> bool { *self == PEAKDETTHR_A::THR6 } #[doc = "Checks if the value of the field is `THR7`"] #[inline(always)] pub fn is_thr7(&self) -> bool { *self == PEAKDETTHR_A::THR7 } } #[doc = "Write proxy for field `PEAKDETTHR`"] pub struct PEAKDETTHR_W<'a> { w: &'a mut W, } impl<'a> PEAKDETTHR_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PEAKDETTHR_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "50mV amplitude detection level"] #[inline(always)] pub fn thr0(self) -> &'a mut W { self.variant(PEAKDETTHR_A::THR0) } #[doc = "75mV amplitude detection level"] #[inline(always)] pub fn thr1(self) -> &'a mut W { self.variant(PEAKDETTHR_A::THR1) } #[doc = "115mV amplitude detection level"] #[inline(always)] pub fn thr2(self) -> &'a mut W { self.variant(PEAKDETTHR_A::THR2) } #[doc = "160mV amplitude detection level"] #[inline(always)] pub fn thr3(self) -> &'a mut W { self.variant(PEAKDETTHR_A::THR3) } #[doc = "220mV amplitude detection level"] #[inline(always)] pub fn thr4(self) -> &'a mut W { self.variant(PEAKDETTHR_A::THR4) } #[doc = "260mV amplitude detection level"] #[inline(always)] pub fn thr5(self) -> &'a mut W { self.variant(PEAKDETTHR_A::THR5) } #[doc = "320mV amplitude detection level"] #[inline(always)] pub fn thr6(self) -> &'a mut W { self.variant(PEAKDETTHR_A::THR6) } #[doc = "Same as THR6"] #[inline(always)] pub fn thr7(self) -> &'a mut W { self.variant(PEAKDETTHR_A::THR7) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 12)) | (((value as u32) & 0x07) << 12); self.w } } impl R { #[doc = "Bits 12:14 - Sets the Amplitude Detection Level (mV)"] #[inline(always)] pub fn peakdetthr(&self) -> PEAKDETTHR_R { PEAKDETTHR_R::new(((self.bits >> 12) & 0x07) as u8) } } impl W { #[doc = "Bits 12:14 - Sets the Amplitude Detection Level (mV)"] #[inline(always)] pub fn peakdetthr(&mut self) -> PEAKDETTHR_W { PEAKDETTHR_W { w: self } } }
is_thr1
_404.tsx
import React from 'react'; import Box from '@material-ui/core/Box'; const _404 = (props: any) => { const { setNavState, location } = props; const { pathname } = location; React.useEffect(() => { setNavState(pathname); window.scrollTo(0, 0); return () => { setNavState('/'); }; }, [pathname, setNavState]); return ( <Box className='fade-in d-flex pt-4 justify-content-center container' height='100vh'> <Box className='align-self-center text-center'> <picture> <source srcSet='/images/logos/logo-128.webp' type='image/webp' /> <img
src='/images/logos/logo-128.png' alt={ 'Vee-Tek Group' + '/images/logos/logo-128.png'.split('/').slice(-1)[0] } /> </picture> <h1>404 Page not found!</h1> <h4>Sorry, the page you are looking for was not found!</h4> </Box> </Box> ); }; export default _404;
className='rounded-circle logo-img mb-4'
dialogue.ts
import { Turn } from './turn'; import { DialogueInvalidError } from './dialogue_invalid_error'; import * as jsYaml from 'js-yaml'; import * as fs from 'fs'; import * as path from 'path'; export class
{ title: string; turns: Turn[]; constructor(filePath: string, preamble?: any[]) { let dialogueDoc; try { dialogueDoc = jsYaml.safeLoad(fs.readFileSync(filePath, 'utf8')); if (!dialogueDoc) { throw new DialogueInvalidError(`Not a valid yaml: ${filePath}`); } } catch (e) { if (e instanceof jsYaml.YAMLException) { throw new DialogueInvalidError(`File is not valid YAML: ${e.message}`); } else { throw new DialogueInvalidError(e.message); } } this.title = dialogueDoc.Title ? dialogueDoc.Title : path.basename(filePath, path.extname(filePath)); if (!dialogueDoc.Dialogue) { throw new DialogueInvalidError('No dialogue found'); } if (!(dialogueDoc.Dialogue instanceof Array)) { throw new DialogueInvalidError( `Dialogue lines must start with dashes: ${dialogueDoc.Dialogue}`); } const turnData = preamble ? preamble : []; this.turns = Turn.createTurns(turnData.concat(dialogueDoc.Dialogue)); } }
Dialogue
helpers.js
/*! * Copyright 2016 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; var path = require('path'); var uniq = require('array-uniq'); var globby = require('globby'); var spawn = require('child_process').spawnSync; require('shelljs/global'); /** * The pull request number. * * @alias ci.PR_NUMBER; */ var PR_NUMBER = process.env.CIRCLE_PR_NUMBER || process.env.APPVEYOR_PULL_REQUEST_NUMBER; /** * Checks to see if this is a pull request or not. * * @alias ci.IS_PR */ var IS_PR = !isNaN(parseInt(PR_NUMBER, 10)); /** * google-cloud-node root directory.. useful in case we need to cd */ var ROOT_DIR = path.join(__dirname, '..'); module.exports.ROOT_DIR = ROOT_DIR; /** * Helper class to make install dependencies + running tests easier to read * and less error prone. * * @class Module * @param {string} name - The module name (e.g. common, bigquery, etc.) */ function Module(name) { if (!(this instanceof Module)) { return new Module(name); } this.name = name; this.directory = path.join(ROOT_DIR, 'packages', name); var pkgJson = require(path.join(this.directory, 'package.json')); this.packageName = pkgJson.name; this.dependencies = Object.keys(pkgJson.devDependencies || {}); } /** * Umbrella module name. * * @static */ Module.UMBRELLA = 'google-cloud'; /** * Retrieves a list of modules that are ahead of origin/master. We do this by * creating a temporary remote branch that points official master branch. * We then do a git diff against the two to get a list of files. From there we * only care about either JS or JSON files being changed. * * @static * @return {Module[]} modules - The updated modules. */ Module.getUpdated = function() { var command = 'git'; var args = ['diff']; if (!isPushToMaster()) { var remotes = spawn('git', ['remote', '-v'], { cwd: ROOT_DIR, stdio: null }); var remotesStdout = remotes.stdout && remotes.stdout.toString(); if (remotesStdout && remotesStdout.indexOf('temp') === -1) { run([ 'git remote add temp', 'https://github.com/GoogleCloudPlatform/google-cloud-node.git' ]); run('git fetch -q temp'); } args.push('HEAD', 'temp/master'); } else { args.push('HEAD^'); } args.push('--name-only'); console.log(command, args.join(' ')); // There's a Windows bug where child_process.exec exits early on `git diff` // which in turn does not return all of the files that have changed. This can // cause a false positive when checking for package changes on AppVeyor var output = spawn(command, args, { cwd: ROOT_DIR, stdio: null }); if (output.status || output.error) { console.error(output.error || output.stderr.toString()); exit(output.status || 1); } var files = output.stdout.toString(); console.log(files); var modules = files .trim() .split('\n') .filter(function(file) { return /^packages\/.+\.js/.test(file); }) .filter(function(file) { // Repo-extraction can make packages disappear, so it's safe to exclude // these from being considered "updated"-- they're just gone. Gone! return test('-e', file); }) .map(function(file) { return file.split('/')[1]; }); return uniq(modules).map(Module); }; /** * Builds docs for all modules * * @static */ Module.buildDocs = function() { run('npm run docs', { cwd: ROOT_DIR }); }; /** * Returns a list containing ALL the modules. * * @static * @return {Module[]} modules - All of em'! */ Module.getAll = function() { cd(ROOT_DIR); return globby .sync('*', { cwd: 'packages' }) .map(Module); }; /** * Returns a list of modules that are dependent on one or more of the modules * specified. * * @static * @param {Module[]} modules - The dependency modules. * @return {Module[]} modules - The dependent modules. */ Module.getDependents = function(modules) { return Module.getAll().filter(function(mod) { return mod.hasDeps(modules); }); }; /** * Installs dependencies for all the modules! * * @static */ Module.installAll = function() { run('npm run postinstall', { cwd: ROOT_DIR }); }; /** * Generates an lcov coverage report for the specified modules. * * @static */ Module.runCoveralls = function() { run('npm run coveralls', { cwd: ROOT_DIR }); }; /** * Installs this modules dependencies via `npm install` */ Module.prototype.install = function() { run('npm install', { cwd: this.directory }); }; /** * Creates/uses symlink for a module (depending on if module was provided) * via `npm link` * * @param {Module=} mod - The module to use with `npm link ${mod.packageName}` */ Module.prototype.link = function(mod) { run(['npm link', mod && mod.packageName || ''], { cwd: this.directory }); }; /** * Runs unit tests for this module via `npm run test` */ Module.prototype.runUnitTests = function() { run('npm run test', { cwd: this.directory }); }; /** * Runs snippet tests for this module. */ Module.prototype.runSnippetTests = function() { process.env.TEST_MODULE = this.name; run('npm run snippet-test', { cwd: ROOT_DIR }); delete process.env.TEST_MODULE; }; /** * Runs system tests for this module via `npm run system-test` */ Module.prototype.runSystemTests = function() { var modulesExcludedFromSystemTests = [ 'common', 'common-grpc', 'dlp', 'error-reporting', 'google-cloud', 'monitoring', 'speech', 'video-intelligence' ]; if (modulesExcludedFromSystemTests.indexOf(this.name) > -1) { return; } run('npm run system-test', { cwd: this.directory }); }; /** * Checks to see if this module has one or more of the supplied modules * as a dev dependency. * * @param {Module[]} modules - The modules to check for. * @return {boolean} */ Module.prototype.hasDeps = function(modules) { var packageName; for (var i = 0; i < modules.length; i++) { packageName = modules[i].packageName; if (this.dependencies.indexOf(packageName) > -1) { return true; } }
}; module.exports.Module = Module; /** * Exec's command via child_process.spawnSync. * By default all output will be piped to the console unless `stdio` * is overridden. * * @param {string} command - The command to run. * @param {object=} options - Options to pass to `spawnSync`. * @return {string|null} */ function run(command, options) { options = options || {}; if (Array.isArray(command)) { command = command.join(' '); } console.log(command); var response = exec(command.trim(), options); if (response.code) { exit(response.code); } return response.stdout; } module.exports.run = run; /** * Used to make committing to git easier/etc.. * * @param {string=} cwd - Directory to commit/add/push from. */ function Git(cwd) { this.cwd = cwd || ROOT_DIR; this.branch = { current: IS_PR ? 'FETCH_HEAD' : run('git rev-parse --abbrev-ref HEAD', { cwd: this.cwd }).trim() }; } // ssh fails on AppVeyor Git.REPO = 'https://github.com/GoogleCloudPlatform/google-cloud-node.git'; /** * Checks out a branch. * * @param {string} branch - The branch to check out. */ Git.prototype.checkout = function(branch) { // trying to checkout the same branch.. so we'll skip it if (this.branch.current === branch) { this.branch.previous = branch; return; } // checking out previous branch.. check if they are the same and skip if (branch === '-' && this.branch.current === this.branch.previous) { delete this.branch.previous; return; } run(['git checkout', branch], { cwd: this.cwd }); this.branch.previous = this.branch.current; this.branch.current = branch; }; /** * Creates a submodule in the root directory in quiet mode. * * @param {string} branch - The branch to use. * @param {string=} alias - Name of the folder that contains submodule. * @return {Git} */ Git.prototype.submodule = function(branch, alias) { alias = alias || branch; var submodulePath = path.join(this.cwd, alias); if (!test('-d', submodulePath)) { run(['git submodule add -q -b', branch, Git.REPO, alias], { cwd: this.cwd }); } var git = new Git(submodulePath); git.branch.current = branch; git.alias = alias; return git; }; /** * Check to see if git has any files it can commit. * * @return {boolean} */ Git.prototype.hasUpdates = function() { var output = run('git status --porcelain', { cwd: this.cwd }); return !!output && output.trim().length > 0; }; /** * Sets git user * * @param {string} name - User name * @param {string} email - User email */ Git.prototype.setUser = function(name, email) { run(['git config --global user.name', name], { cwd: this.cwd }); run(['git config --global user.email', email], { cwd: this.cwd }); }; /** * Adds all files passed in via git add * * @param {...string} file - File to add */ Git.prototype.add = function() { var files = [].slice.call(arguments); var command = ['git add'].concat(files); run(command, { cwd: this.cwd }); }; /** * Removes files via git * * @param {string=} options - Command line options like -rf * @param {...string} file - File to remove. */ Git.prototype.remove = function() { var files = [].slice.call(arguments); var command = ['git rm'].concat(files); run(command, { cwd: this.cwd }); }; /** * Commits to git via commit message. * * @param {string} message - The commit message. */ Git.prototype.commit = function(message) { run(['git commit -m', '"' + message + ' [ci skip]"'], { cwd: this.cwd }); }; /** * Runs git status and pushes changes in quiet mode. * * @param {string} branch - The branch to push to. */ Git.prototype.push = function(branch) { run('git status', { cwd: this.cwd }); run(['git push -q', Git.REPO, branch], { cwd: this.cwd }); }; /** * Deinits a submodule. * * @param {git} submodule - The submodule instance. */ Git.prototype.deinit = function(submodule) { var options = { cwd: this.cwd }; run(['git submodule deinit -f', submodule.alias], options); run(['git rm -rf', submodule.alias], options); run('git rm -rf .gitmodules', options); rm('-rf', path.resolve('.git/modules', submodule.alias)); }; module.exports.git = new Git(); /** * The name of the branch currently being tested. * * @alias ci.BRANCH */ var BRANCH = process.env.CIRCLE_BRANCH || process.env.APPVEYOR_REPO_BRANCH; /** * Returns the tag name (assuming this is a release) * * @alias ci.getTagName * @return {string|null} */ function getTagName() { return process.env.CIRCLE_TAG || process.env.APPVEYOR_REPO_TAG_NAME; } /** * Let's us know whether or not this is a release. * * @alias ci.isReleaseBuild * @return {string|null} */ function isReleaseBuild() { return !!getTagName(); } /** * Returns name/version of release. * * @alias ci.getRelease * @return {object|null} */ function getRelease() { var tag = getTagName(); if (!tag) { return null; } var parts = tag.split('-'); return { version: parts.pop(), name: parts.pop() || Module.UMBRELLA }; } /** * Checks to see if this is a push to master. * * @alias ci.isPushToMaster * @return {boolean} */ function isPushToMaster() { return BRANCH === 'master' && !IS_PR; } /** * Checks to see if this the CI's first pass (Circle only). * * @alias ci.isFirstPass * @return {boolean} */ function isFirstPass() { return process.env.CIRCLECI && /^v4/.test(process.version); } module.exports.ci = { BRANCH: BRANCH, IS_PR: IS_PR, PR_NUMBER: PR_NUMBER, getTagName: getTagName, isReleaseBuild: isReleaseBuild, getRelease: getRelease, isPushToMaster: isPushToMaster, isFirstPass: isFirstPass };
return false;
clients.py
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root # for license information. from __future__ import absolute_import, division, print_function, unicode_literals import atexit import os import sys import debugpy from debugpy import adapter, common, launcher from debugpy.common import compat, fmt, json, log, messaging, sockets from debugpy.common.compat import unicode from debugpy.adapter import components, servers, sessions class Client(components.Component): """Handles the client side of a debug session.""" message_handler = components.Component.message_handler class Capabilities(components.Capabilities): PROPERTIES = { "supportsVariableType": False, "supportsVariablePaging": False, "supportsRunInTerminalRequest": False, "supportsMemoryReferences": False, } class Expectations(components.Capabilities): PROPERTIES = { "locale": "en-US", "linesStartAt1": True, "columnsStartAt1": True, "pathFormat": json.enum("path", optional=True), # we don't support "uri" } def __init__(self, sock): if sock == "stdio": log.info("Connecting to client over stdio...", self) stream = messaging.JsonIOStream.from_stdio() # Make sure that nothing else tries to interfere with the stdio streams # that are going to be used for DAP communication from now on. sys.stdin = stdin = open(os.devnull, "r") atexit.register(stdin.close) sys.stdout = stdout = open(os.devnull, "w") atexit.register(stdout.close) else: stream = messaging.JsonIOStream.from_socket(sock) with sessions.Session() as session: super(Client, self).__init__(session, stream) self.client_id = None """ID of the connecting client. This can be 'test' while running tests.""" self.has_started = False """Whether the "launch" or "attach" request was received from the client, and fully handled. """ self.start_request = None """The "launch" or "attach" request as received from the client. """ self._initialize_request = None """The "initialize" request as received from the client, to propagate to the server later.""" self._deferred_events = [] """Deferred events from the launcher and the server that must be propagated only if and when the "launch" or "attach" response is sent. """ self._known_subprocesses = set() """servers.Connection instances for subprocesses that this client has been made aware of. """ session.client = self session.register() # For the transition period, send the telemetry events with both old and new # name. The old one should be removed once the new one lights up. self.channel.send_event( "output", { "category": "telemetry", "output": "ptvsd", "data": {"packageVersion": debugpy.__version__}, }, ) self.channel.send_event( "output", { "category": "telemetry", "output": "debugpy", "data": {"packageVersion": debugpy.__version__}, }, ) def propagate_after_start(self, event): # pydevd starts sending events as soon as we connect, but the client doesn't # expect to see any until it receives the response to "launch" or "attach" # request. If client is not ready yet, save the event instead of propagating # it immediately. if self._deferred_events is not None: self._deferred_events.append(event) log.debug("Propagation deferred.") else: self.client.channel.propagate(event) def _propagate_deferred_events(self): log.debug("Propagating deferred events to {0}...", self.client) for event in self._deferred_events: log.debug("Propagating deferred {0}", event.describe()) self.client.channel.propagate(event) log.info("All deferred events propagated to {0}.", self.client) self._deferred_events = None # Generic event handler. There are no specific handlers for client events, because # there are no events from the client in DAP - but we propagate them if we can, in # case some events appear in future protocol versions. @message_handler def event(self, event): if self.server: self.server.channel.propagate(event) # Generic request handler, used if there's no specific handler below. @message_handler def request(self, request): return self.server.channel.delegate(request) @message_handler def initialize_request(self, request): if self._initialize_request is not None: raise request.isnt_valid("Session is already initialized") self.client_id = request("clientID", "") self.capabilities = self.Capabilities(self, request) self.expectations = self.Expectations(self, request) self._initialize_request = request exception_breakpoint_filters = [ { "filter": "raised", "label": "Raised Exceptions", "default": False, "description": "Break whenever any exception is raised.", }, { "filter": "uncaught", "label": "Uncaught Exceptions", "default": True, "description": "Break when the process is exiting due to unhandled exception.", }, { "filter": "userUnhandled", "label": "User Uncaught Exceptions", "default": False, "description": "Break when exception escapes into library code.", }, ] return { "supportsCompletionsRequest": True, "supportsConditionalBreakpoints": True, "supportsConfigurationDoneRequest": True, "supportsDebuggerProperties": True, "supportsDelayedStackTraceLoading": True, "supportsEvaluateForHovers": True, "supportsExceptionInfoRequest": True, "supportsExceptionOptions": True, "supportsFunctionBreakpoints": True, "supportsHitConditionalBreakpoints": True, "supportsLogPoints": True, "supportsModulesRequest": True, "supportsSetExpression": True, "supportsSetVariable": True, "supportsValueFormattingOptions": True, "supportsTerminateDebuggee": True, "supportsGotoTargetsRequest": True, "supportsClipboardContext": True, "exceptionBreakpointFilters": exception_breakpoint_filters, "supportsStepInTargetsRequest": True, } # Common code for "launch" and "attach" request handlers. # # See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522 # for the sequence of request and events necessary to orchestrate the start. def _start_message_handler(f): @components.Component.message_handler def handle(self, request): assert request.is_request("launch", "attach") if self._initialize_request is None: raise request.isnt_valid("Session is not initialized yet") if self.launcher or self.server: raise request.isnt_valid("Session is already started") self.session.no_debug = request("noDebug", json.default(False)) if self.session.no_debug: servers.dont_wait_for_first_connection() self.session.debug_options = debug_options = set( request("debugOptions", json.array(unicode)) ) f(self, request) if request.response is not None: return if self.server: self.server.initialize(self._initialize_request) self._initialize_request = None arguments = request.arguments if self.launcher: redirecting = arguments.get("console") == "internalConsole" if "RedirectOutput" in debug_options: # The launcher is doing output redirection, so we don't need the # server to do it, as well. arguments = dict(arguments) arguments["debugOptions"] = list( debug_options - {"RedirectOutput"} ) redirecting = True if arguments.get("redirectOutput"): arguments = dict(arguments) del arguments["redirectOutput"] redirecting = True arguments["isOutputRedirected"] = redirecting # pydevd doesn't send "initialized", and responds to the start request # immediately, without waiting for "configurationDone". If it changes # to conform to the DAP spec, we'll need to defer waiting for response. try: self.server.channel.request(request.command, arguments) except messaging.NoMoreMessages: # Server closed connection before we could receive the response to # "attach" or "launch" - this can happen when debuggee exits shortly # after starting. It's not an error, but we can't do anything useful # here at this point, either, so just bail out. request.respond({}) self.session.finalize( fmt( "{0} disconnected before responding to {1!j}", self.server, request.command, ) ) return except messaging.MessageHandlingError as exc: exc.propagate(request) if self.session.no_debug: self.start_request = request self.has_started = True request.respond({}) self._propagate_deferred_events() return if "clientOS" in request: client_os = request("clientOS", json.enum("windows", "unix")).upper() elif {"WindowsClient", "Windows"} & debug_options: client_os = "WINDOWS" elif {"UnixClient", "UNIX"} & debug_options: client_os = "UNIX" else: client_os = "WINDOWS" if sys.platform == "win32" else "UNIX" self.server.channel.request( "setDebuggerProperty", { "skipSuspendOnBreakpointException": ("BaseException",), "skipPrintBreakpointException": ("NameError",), "multiThreadsSingleNotification": True, "ideOS": client_os, }, ) # Let the client know that it can begin configuring the adapter. self.channel.send_event("initialized") self.start_request = request return messaging.NO_RESPONSE # will respond on "configurationDone" return handle @_start_message_handler def launch_request(self, request): from debugpy.adapter import launchers if self.session.id != 1 or len(servers.connections()): raise request.cant_handle('"attach" expected') debug_options = set(request("debugOptions", json.array(unicode))) # Handling of properties that can also be specified as legacy "debugOptions" flags. # If property is explicitly set to false, but the flag is in "debugOptions", treat # it as an error. Returns None if the property wasn't explicitly set either way. def property_or_debug_option(prop_name, flag_name): assert prop_name[0].islower() and flag_name[0].isupper() value = request(prop_name, bool, optional=True) if value == (): value = None if flag_name in debug_options: if value is False: raise request.isnt_valid( '{0!j}:false and "debugOptions":[{1!j}] are mutually exclusive', prop_name, flag_name, ) value = True return value # "pythonPath" is a deprecated legacy spelling. If "python" is missing, then try # the alternative. But if both are missing, the error message should say "python". python_key = "python" if python_key in request: if "pythonPath" in request: raise request.isnt_valid( '"pythonPath" is not valid if "python" is specified' ) elif "pythonPath" in request: python_key = "pythonPath" python = request(python_key, json.array(unicode, vectorize=True, size=(0,))) if not len(python): python = [compat.filename(sys.executable)] python += request("pythonArgs", json.array(unicode, size=(0,))) request.arguments["pythonArgs"] = python[1:] request.arguments["python"] = python launcher_python = request("debugLauncherPython", unicode, optional=True) if launcher_python == (): launcher_python = python[0] program = module = code = () if "program" in request: program = request("program", unicode) args = [program] request.arguments["processName"] = program if "module" in request: module = request("module", unicode) args = ["-m", module] request.arguments["processName"] = module if "code" in request: code = request("code", json.array(unicode, vectorize=True, size=(1,))) args = ["-c", "\n".join(code)] request.arguments["processName"] = "-c" num_targets = len([x for x in (program, module, code) if x != ()]) if num_targets == 0: raise request.isnt_valid( 'either "program", "module", or "code" must be specified' ) elif num_targets != 1: raise request.isnt_valid( '"program", "module", and "code" are mutually exclusive' ) # Propagate "args" via CLI if and only if shell expansion is requested. args_expansion = request( "argsExpansion", json.enum("shell", "none", optional=True) ) if args_expansion == "shell": args += request("args", json.array(unicode)) request.arguments.pop("args", None) cwd = request("cwd", unicode, optional=True) if cwd == (): # If it's not specified, but we're launching a file rather than a module, # and the specified path has a directory in it, use that. cwd = None if program == () else (os.path.dirname(program) or None) console = request( "console", json.enum( "internalConsole", "integratedTerminal", "externalTerminal", optional=True, ), ) console_title = request("consoleTitle", json.default("Python Debug Console")) sudo = bool(property_or_debug_option("sudo", "Sudo")) if sudo and sys.platform == "win32": raise request.cant_handle('"sudo":true is not supported on Windows.') launcher_path = request("debugLauncherPath", os.path.dirname(launcher.__file__)) adapter_host = request("debugAdapterHost", "127.0.0.1") try: servers.serve(adapter_host) except Exception as exc: raise request.cant_handle( "{0} couldn't create listener socket for servers: {1}", self.session, exc, ) launchers.spawn_debuggee( self.session, request, [launcher_python], launcher_path, adapter_host, args, cwd, console, console_title, sudo, ) @_start_message_handler def attach_request(self, request):
@message_handler def configurationDone_request(self, request): if self.start_request is None or self.has_started: request.cant_handle( '"configurationDone" is only allowed during handling of a "launch" ' 'or an "attach" request' ) try: self.has_started = True try: result = self.server.channel.delegate(request) except messaging.NoMoreMessages: # Server closed connection before we could receive the response to # "configurationDone" - this can happen when debuggee exits shortly # after starting. It's not an error, but we can't do anything useful # here at this point, either, so just bail out. request.respond({}) self.start_request.respond({}) self.session.finalize( fmt( "{0} disconnected before responding to {1!j}", self.server, request.command, ) ) return else: request.respond(result) except messaging.MessageHandlingError as exc: self.start_request.cant_handle(str(exc)) finally: if self.start_request.response is None: self.start_request.respond({}) self._propagate_deferred_events() # Notify the client of any child processes of the debuggee that aren't already # being debugged. for conn in servers.connections(): if conn.server is None and conn.ppid == self.session.pid: self.notify_of_subprocess(conn) @message_handler def evaluate_request(self, request): propagated_request = self.server.channel.propagate(request) def handle_response(response): request.respond(response.body) propagated_request.on_response(handle_response) return messaging.NO_RESPONSE @message_handler def pause_request(self, request): request.arguments["threadId"] = "*" return self.server.channel.delegate(request) @message_handler def continue_request(self, request): request.arguments["threadId"] = "*" try: return self.server.channel.delegate(request) except messaging.NoMoreMessages: # pydevd can sometimes allow the debuggee to exit before the queued # "continue" response gets sent. Thus, a failed "continue" response # indicating that the server disconnected should be treated as success. return {"allThreadsContinued": True} @message_handler def debugpySystemInfo_request(self, request): result = {"debugpy": {"version": debugpy.__version__}} if self.server: try: pydevd_info = self.server.channel.request("pydevdSystemInfo") except Exception: # If the server has already disconnected, or couldn't handle it, # report what we've got. pass else: result.update(pydevd_info) return result @message_handler def terminate_request(self, request): self.session.finalize('client requested "terminate"', terminate_debuggee=True) return {} @message_handler def disconnect_request(self, request): terminate_debuggee = request("terminateDebuggee", bool, optional=True) if terminate_debuggee == (): terminate_debuggee = None self.session.finalize('client requested "disconnect"', terminate_debuggee) return {} def notify_of_subprocess(self, conn): with self.session: if self.start_request is None or conn in self._known_subprocesses: return if "processId" in self.start_request.arguments: log.warning( "Not reporting subprocess for {0}, because the parent process " 'was attached to using "processId" rather than "port".', self.session, ) return log.info("Notifying {0} about {1}.", self, conn) body = dict(self.start_request.arguments) self._known_subprocesses.add(conn) for key in "processId", "listen", "preLaunchTask", "postDebugTask": body.pop(key, None) body["name"] = fmt("Subprocess {0}", conn.pid) body["request"] = "attach" body["subProcessId"] = conn.pid for key in "args", "processName", "pythonArgs": body.pop(key, None) host = body.pop("host", None) port = body.pop("port", None) if "connect" not in body: body["connect"] = {} if "host" not in body["connect"]: body["connect"]["host"] = host if host is not None else "127.0.0.1" if "port" not in body["connect"]: if port is None: _, port = listener.getsockname() body["connect"]["port"] = port self.channel.send_event("debugpyAttach", body) def serve(host, port): global listener listener = sockets.serve("Client", Client, host, port) return listener.getsockname() def stop_serving(): try: listener.close() except Exception: log.swallow_exception(level="warning")
if self.session.no_debug: raise request.isnt_valid('"noDebug" is not supported for "attach"') host = request("host", unicode, optional=True) port = request("port", int, optional=True) listen = request("listen", dict, optional=True) connect = request("connect", dict, optional=True) pid = request("processId", (int, unicode), optional=True) sub_pid = request("subProcessId", int, optional=True) if host != () or port != (): if listen != (): raise request.isnt_valid( '"listen" and "host"/"port" are mutually exclusive' ) if connect != (): raise request.isnt_valid( '"connect" and "host"/"port" are mutually exclusive' ) if listen != (): if connect != (): raise request.isnt_valid( '"listen" and "connect" are mutually exclusive' ) if pid != (): raise request.isnt_valid( '"listen" and "processId" are mutually exclusive' ) if sub_pid != (): raise request.isnt_valid( '"listen" and "subProcessId" are mutually exclusive' ) if pid != () and sub_pid != (): raise request.isnt_valid( '"processId" and "subProcessId" are mutually exclusive' ) if listen != (): host = listen("host", "127.0.0.1") port = listen("port", int) adapter.access_token = None host, port = servers.serve(host, port) else: host, port = servers.serve() # There are four distinct possibilities here. # # If "processId" is specified, this is attach-by-PID. We need to inject the # debug server into the designated process, and then wait until it connects # back to us. Since the injected server can crash, there must be a timeout. # # If "subProcessId" is specified, this is attach to a known subprocess, likely # in response to a "debugpyAttach" event. If so, the debug server should be # connected already, and thus the wait timeout is zero. # # If "listen" is specified, this is attach-by-socket with the server expected # to connect to the adapter via debugpy.connect(). There is no PID known in # advance, so just wait until the first server connection indefinitely, with # no timeout. # # If "connect" is specified, this is attach-by-socket in which the server has # spawned the adapter via debugpy.listen(). There is no PID known to the client # in advance, but the server connection should be either be there already, or # the server should be connecting shortly, so there must be a timeout. # # In the last two cases, if there's more than one server connection already, # this is a multiprocess re-attach. The client doesn't know the PID, so we just # connect it to the oldest server connection that we have - in most cases, it # will be the one for the root debuggee process, but if it has exited already, # it will be some subprocess. if pid != (): if not isinstance(pid, int): try: pid = int(pid) except Exception: raise request.isnt_valid('"processId" must be parseable as int') debugpy_args = request("debugpyArgs", json.array(unicode)) servers.inject(pid, debugpy_args) timeout = common.PROCESS_SPAWN_TIMEOUT pred = lambda conn: conn.pid == pid else: if sub_pid == (): pred = lambda conn: True timeout = common.PROCESS_SPAWN_TIMEOUT if listen == () else None else: pred = lambda conn: conn.pid == sub_pid timeout = 0 self.channel.send_event("debugpyWaitingForServer", {"host": host, "port": port}) conn = servers.wait_for_connection(self.session, pred, timeout) if conn is None: if sub_pid != (): # If we can't find a matching subprocess, it's not always an error - # it might have already exited, or didn't even get a chance to connect. # To prevent the client from complaining, pretend that the "attach" # request was successful, but that the session terminated immediately. request.respond({}) self.session.finalize( fmt('No known subprocess with "subProcessId":{0}', sub_pid) ) return raise request.cant_handle( ( "Timed out waiting for debug server to connect." if timeout else "There is no debug server connected to this adapter." ), sub_pid, ) try: conn.attach_to_session(self.session) except ValueError: request.cant_handle("{0} is already being debugged.", conn)
main.py
# Complete project details at https://RandomNerdTutorials.com import socket import network import machine, onewire, ds18x20, time sta_if = network.WLAN(network.STA_IF) print(sta_if.ifconfig()) ds_pin = machine.Pin(4) ds_sensor = ds18x20.DS18X20(onewire.OneWire(ds_pin)) def read_ds_sensor():
def web_page(): temp = read_ds_sensor() html = """<!DOCTYPE HTML><html><head> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta http-equiv="refresh" content="60"> <link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.7.2/css/all.css" integrity="sha384-fnmOCqbTlWIlj8LyTjo7mOUStjsKC4pOpQbqyi7RrhN7udi9RwhKkMHpvLbHG9Sr" crossorigin="anonymous"> <style> html { font-family: Arial; display: inline-block; margin: 0px auto; text-align: center; } h2 { font-size: 3.0rem; } p { font-size: 3.0rem; } .units { font-size: 1.2rem; } .ds-labels{ font-size: 1.5rem; vertical-align:middle; padding-bottom: 15px; } </style></head><body><h2>ESP with DS18B20</h2> <p><i class="fas fa-thermometer-half" style="color:#059e8a;"></i> <span class="ds-labels">Temperature</span> <span id="temperature">""" + str(temp) + """</span> <sup class="units">&deg;C</sup> </p> <p><i class="fas fa-thermometer-half" style="color:#059e8a;"></i> <span class="ds-labels">Temperature</span> <span id="temperature">""" + str(round(temp * (9/5) + 32.0, 2)) + """</span> <sup class="units">&deg;F</sup> </p></body></html>""" return html s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 80)) s.listen(5) while True: try: if gc.mem_free() < 102000: gc.collect() conn, addr = s.accept() conn.settimeout(3.0) print('Got a connection from %s' % str(addr)) request = conn.recv(1024) conn.settimeout(None) request = str(request) print('Content = %s' % request) response = web_page() conn.send('HTTP/1.1 200 OK\n') conn.send('Content-Type: text/html\n') conn.send('Connection: close\n\n') conn.sendall(response) conn.close() except OSError as e: conn.close() print('Connection closed')
roms = ds_sensor.scan() print('Found DS devices: ', roms) print('Temperatures: ') ds_sensor.convert_temp() for rom in roms: temp = ds_sensor.read_temp(rom) if isinstance(temp, float): msg = round(temp, 2) print(temp, end=' ') print('Valid temperature') return msg return b'0.0'
gzipfile.py
# -*- coding: utf-8 -*- """Gzip compressed stream file.""" # Note: do not rename file to gzip.py this can cause the exception: # AttributeError: 'module' object has no attribute 'GzipFile' # when using pip. import collections import os from dtfabric.runtime import fabric as dtfabric_fabric from dfvfs.compression import zlib_decompressor from dfvfs.lib import data_format from dfvfs.lib import errors class _GzipDecompressorState(object): """Deflate decompressor wrapper for reading a gzip member. This class encapsulates the state of a deflate decompression object, as well as the location of the decompressor's source data. Attributes: uncompressed_offset (int): offset into the uncompressed data in a gzip member last emitted by the state object. """ _MAXIMUM_READ_SIZE = 16 * 1024 * 1024 def __init__(self, stream_start): """Initializes a gzip member decompressor wrapper. Args: stream_start (int): offset to the compressed stream within the containing file object. """ self._compressed_data = b'' self._decompressor = zlib_decompressor.DeflateDecompressor() self._last_read = stream_start self.uncompressed_offset = 0 def Read(self, file_object): """Reads the next uncompressed data from the gzip stream. Args: file_object (FileIO): file object that contains the compressed stream. Returns: bytes: next uncompressed data from the compressed stream. """ file_object.seek(self._last_read, os.SEEK_SET) read_data = file_object.read(self._MAXIMUM_READ_SIZE) self._last_read = file_object.get_offset() compressed_data = b''.join([self._compressed_data, read_data]) decompressed_data, remaining_compressed_data = ( self._decompressor.Decompress(compressed_data)) self._compressed_data = remaining_compressed_data self.uncompressed_offset += len(decompressed_data) return decompressed_data def GetUnusedData(self): """Retrieves any bytes past the end of the compressed data. See https://docs.python.org/2/library/zlib.html#zlib.Decompress.unused_data Unused data can be any bytes after a Deflate compressed block (or chunk). Returns: bytes: data past the end of the compressed data, if any has been read from the gzip file. """ return self._decompressor.unused_data class GzipMember(data_format.DataFormat): """Gzip member. Gzip files have no index of members, so each member must be read sequentially before metadata and random seeks are possible. This class provides caching of gzip member data during the initial read of each member. Attributes: comment (str): comment stored in the member. member_end_offset (int): offset to the end of the member in the parent file object. member_start_offset (int): offset to the start of the member in the parent file object. operating_system (int): type of file system on which the compression took place. original_filename (str): original filename of the uncompressed file. uncompressed_data_offset (int): offset of the start of the uncompressed data in this member relative to the whole gzip file's uncompressed data. uncompressed_data_size (int): total size of the data in this gzip member after decompression. """ _DATA_TYPE_FABRIC_DEFINITION_FILE = os.path.join( os.path.dirname(__file__), 'gzipfile.yaml') with open(_DATA_TYPE_FABRIC_DEFINITION_FILE, 'rb') as file_object: _DATA_TYPE_FABRIC_DEFINITION = file_object.read() _DATA_TYPE_FABRIC = dtfabric_fabric.DataTypeFabric( yaml_definition=_DATA_TYPE_FABRIC_DEFINITION) _MEMBER_HEADER = _DATA_TYPE_FABRIC.CreateDataTypeMap( 'gzip_member_header') _MEMBER_HEADER_SIZE = _MEMBER_HEADER.GetByteSize() _MEMBER_FOOTER = _DATA_TYPE_FABRIC.CreateDataTypeMap( 'gzip_member_footer') _MEMBER_FOOTER_SIZE = _MEMBER_FOOTER.GetByteSize() _UINT16LE = _DATA_TYPE_FABRIC.CreateDataTypeMap('uint16le') _UINT16LE_SIZE = _UINT16LE.GetByteSize() _CSTRING = _DATA_TYPE_FABRIC.CreateDataTypeMap('cstring') _GZIP_SIGNATURE = 0x8b1f _COMPRESSION_METHOD_DEFLATE = 8 _FLAG_FTEXT = 0x01 _FLAG_FHCRC = 0x02 _FLAG_FEXTRA = 0x04 _FLAG_FNAME = 0x08 _FLAG_FCOMMENT = 0x10 # The maximum size of the uncompressed data cache. _UNCOMPRESSED_DATA_CACHE_SIZE = 2 * 1024 * 1024 def __init__( self, file_object, member_start_offset, uncompressed_data_offset): """Initializes a gzip member. Args: file_object (FileIO): file-like object, containing the gzip member. member_start_offset (int): offset to the beginning of the gzip member in the containing file. uncompressed_data_offset (int): offset of the start of the uncompressed data in this member relative to the whole gzip file's uncompressed data. """ self._cache = b'' # End offset of the cached uncompressed data of the member. self._cache_end_offset = None # Start offset of the cached uncompressed data of the member. self._cache_start_offset = None self.comment = None self.modification_time = None self.operating_system = None self.original_filename = None file_size = file_object.get_size() file_object.seek(member_start_offset, os.SEEK_SET) self._ReadMemberHeader(file_object) data_offset = 0 uncompressed_data_size = 0 compressed_data_offset = file_object.get_offset() decompressor_state = _GzipDecompressorState(compressed_data_offset) # Read the member data to determine the uncompressed data size and # the offset of the member footer. file_offset = compressed_data_offset while file_offset < file_size: data_offset += uncompressed_data_size decompressed_data = decompressor_state.Read(file_object) uncompressed_data_size += len(decompressed_data) # Note that unused data will be set when the decompressor reads beyond # the end of the compressed data stream. unused_data = decompressor_state.GetUnusedData() if unused_data: file_object.seek(-len(unused_data), os.SEEK_CUR) file_offset = file_object.get_offset() break file_offset = file_object.get_offset() # Do not read the the last member footer if it is missing, which is # a common corruption scenario. if file_offset < file_size: self._ReadStructure( file_object, file_offset, self._MEMBER_FOOTER_SIZE, self._MEMBER_FOOTER, 'member footer') member_end_offset = file_object.get_offset() # Initialize the member with data. self._file_object = file_object self._file_object.seek(member_start_offset, os.SEEK_SET) # Cache uncompressed data of gzip files that fit entirely in the cache. if (data_offset == 0 and uncompressed_data_size < self._UNCOMPRESSED_DATA_CACHE_SIZE): self._cache = decompressed_data self._cache_start_offset = 0 self._cache_end_offset = uncompressed_data_size # Offset to the beginning of the compressed data in the file object. self._compressed_data_start = compressed_data_offset self._decompressor_state = _GzipDecompressorState(compressed_data_offset) # Offset to the start of the member in the parent file object. self.member_start_offset = member_start_offset # Offset to the end of the member in the parent file object. self.member_end_offset = member_end_offset # Total size of the data in this gzip member after decompression. self.uncompressed_data_size = uncompressed_data_size # Offset of the start of the uncompressed data in this member relative to # the whole gzip file's uncompressed data. self.uncompressed_data_offset = uncompressed_data_offset def _GetCacheSize(self):
def _IsCacheFull(self): """Checks whether the uncompressed data cache is full. Returns: bool: True if the cache is full. """ return self._GetCacheSize() >= self._UNCOMPRESSED_DATA_CACHE_SIZE def _LoadDataIntoCache(self, file_object, minimum_offset): """Reads and decompresses the data in the member. This function already loads as much data as possible in the cache, up to UNCOMPRESSED_DATA_CACHE_SIZE bytes. Args: file_object (FileIO): file-like object. minimum_offset (int): offset into this member's uncompressed data at which the cache should start. """ # Decompression can only be performed from beginning to end of the stream. # So, if data before the current position of the decompressor in the stream # is required, it's necessary to throw away the current decompression # state and start again. if minimum_offset < self._decompressor_state.uncompressed_offset: self._ResetDecompressorState() cache_is_full = self._IsCacheFull() while not cache_is_full: decompressed_data = self._decompressor_state.Read(file_object) # Note that decompressed_data will be empty if there is no data left # to read and decompress. if not decompressed_data: break decompressed_data_length = len(decompressed_data) decompressed_end_offset = self._decompressor_state.uncompressed_offset decompressed_start_offset = ( decompressed_end_offset - decompressed_data_length) data_to_add = decompressed_data added_data_start_offset = decompressed_start_offset if decompressed_start_offset < minimum_offset: data_to_add = None if decompressed_start_offset < minimum_offset < decompressed_end_offset: data_add_offset = decompressed_end_offset - minimum_offset data_to_add = decompressed_data[-data_add_offset:] added_data_start_offset = decompressed_end_offset - data_add_offset if data_to_add and not cache_is_full: self._cache = b''.join([self._cache, data_to_add]) if self._cache_start_offset is None: self._cache_start_offset = added_data_start_offset if self._cache_end_offset is None: self._cache_end_offset = self._cache_start_offset + len(data_to_add) else: self._cache_end_offset += len(data_to_add) cache_is_full = self._IsCacheFull() # If there's no more data in the member, the unused_data value is # populated in the decompressor. When this situation arises, we rewind # to the end of the compressed_data section. unused_data = self._decompressor_state.GetUnusedData() if unused_data: seek_offset = -len(unused_data) file_object.seek(seek_offset, os.SEEK_CUR) self._ResetDecompressorState() break def _ReadMemberHeader(self, file_object): """Reads a member header. Args: file_object (FileIO): file-like object to read from. Raises: FileFormatError: if the member header cannot be read. """ file_offset = file_object.get_offset() member_header = self._ReadStructure( file_object, file_offset, self._MEMBER_HEADER_SIZE, self._MEMBER_HEADER, 'member header') if member_header.signature != self._GZIP_SIGNATURE: raise errors.FileFormatError( 'Unsupported signature: 0x{0:04x}.'.format(member_header.signature)) if member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE: raise errors.FileFormatError( 'Unsupported compression method: {0:d}.'.format( member_header.compression_method)) self.modification_time = member_header.modification_time self.operating_system = member_header.operating_system if member_header.flags & self._FLAG_FEXTRA: file_offset = file_object.get_offset() extra_field_data_size = self._ReadStructure( file_object, file_offset, self._UINT16LE_SIZE, self._UINT16LE, 'extra field data size') file_object.seek(extra_field_data_size, os.SEEK_CUR) if member_header.flags & self._FLAG_FNAME: file_offset = file_object.get_offset() string_value = self._ReadString( file_object, file_offset, self._CSTRING, 'original filename') self.original_filename = string_value.rstrip('\x00') if member_header.flags & self._FLAG_FCOMMENT: file_offset = file_object.get_offset() string_value = self._ReadString( file_object, file_offset, self._CSTRING, 'comment') self.comment = string_value.rstrip('\x00') if member_header.flags & self._FLAG_FHCRC: file_object.read(2) def _ResetDecompressorState(self): """Resets the state of the internal decompression object.""" self._decompressor_state = _GzipDecompressorState( self._compressed_data_start) def FlushCache(self): """Empties the cache that holds cached decompressed data.""" self._cache = b'' self._cache_start_offset = None self._cache_end_offset = None self._ResetDecompressorState() def ReadAtOffset(self, offset, size=None): """Reads a byte string from the gzip member at the specified offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: offset (int): offset within the uncompressed data in this member to read from. size (Optional[int]): maximum number of bytes to read, where None represents all remaining data, to a maximum of the uncompressed cache size. Returns: bytes: data read. Raises: IOError: if the read failed. ValueError: if a negative read size or offset is specified. """ if size is not None and size < 0: raise ValueError('Invalid size value {0!s}'.format(size)) if offset < 0: raise ValueError('Invalid offset value {0!s}'.format(offset)) if size == 0 or offset >= self.uncompressed_data_size: return b'' if self._cache_start_offset is None: self._LoadDataIntoCache(self._file_object, offset) if offset > self._cache_end_offset or offset < self._cache_start_offset: self.FlushCache() self._LoadDataIntoCache(self._file_object, offset) cache_offset = offset - self._cache_start_offset if not size: return self._cache[cache_offset:] data_end_offset = cache_offset + size if data_end_offset > self._cache_end_offset: return self._cache[cache_offset:] return self._cache[cache_offset:data_end_offset] class GzipCompressedStream(object): """File-like object of a gzip compressed stream (file). The gzip file format is defined in RFC1952: http://www.zlib.org/rfc-gzip.html Attributes: uncompressed_data_size (int): total size of the decompressed data stored in the gzip file. """ def __init__(self): """Initializes a file-like object.""" super(GzipCompressedStream, self).__init__() self._compressed_data_size = -1 self._current_offset = 0 self._file_object = None self._members_by_end_offset = collections.OrderedDict() self.uncompressed_data_size = 0 @property def members(self): """list(GzipMember): members in the gzip file.""" return list(self._members_by_end_offset.values()) def _GetMemberForOffset(self, offset): """Finds the member whose data includes the provided offset. Args: offset (int): offset in the uncompressed data to find the containing member for. Returns: GzipMember: gzip file member or None if not available. Raises: ValueError: if the provided offset is outside of the bounds of the uncompressed data. """ if offset < 0 or offset >= self.uncompressed_data_size: raise ValueError('Offset {0:d} is larger than file size {1:d}.'.format( offset, self.uncompressed_data_size)) for end_offset, member in self._members_by_end_offset.items(): if offset < end_offset: return member return None def Open(self, file_object): """Opens the file-like object defined by path specification. Args: file_object (FileIO): file-like object that contains the gzip compressed stream. Raises: IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. """ file_size = file_object.get_size() file_object.seek(0, os.SEEK_SET) uncompressed_data_offset = 0 next_member_offset = 0 while next_member_offset < file_size: member = GzipMember( file_object, next_member_offset, uncompressed_data_offset) uncompressed_data_offset = ( uncompressed_data_offset + member.uncompressed_data_size) self._members_by_end_offset[uncompressed_data_offset] = member self.uncompressed_data_size += member.uncompressed_data_size next_member_offset = member.member_end_offset self._file_object = file_object # Note: that the following functions do not follow the style guide # because they are part of the file-like object interface. # pylint: disable=invalid-name def close(self): """Closes the file-like object.""" self._members_by_end_offset = [] if self._file_object: self._file_object = None def read(self, size=None): """Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ data = b'' while ((size and len(data) < size) and self._current_offset < self.uncompressed_data_size): member = self._GetMemberForOffset(self._current_offset) member_offset = self._current_offset - member.uncompressed_data_offset data_read = member.ReadAtOffset(member_offset, size) if not data_read: break self._current_offset += len(data_read) data = b''.join([data, data_read]) return data def seek(self, offset, whence=os.SEEK_SET): """Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed or the file has not been opened. OSError: if the seek failed or the file has not been opened. """ if not self._file_object: raise IOError('Not opened.') if whence == os.SEEK_CUR: offset += self._current_offset elif whence == os.SEEK_END: offset += self.uncompressed_data_size elif whence != os.SEEK_SET: raise IOError('Unsupported whence.') if offset < 0: raise IOError('Invalid offset value less than zero.') self._current_offset = offset def get_offset(self): """Retrieves the current offset into the file-like object. Returns: int: current offset into the file-like object. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened. """ if not self._file_object: raise IOError('Not opened.') return self._current_offset def get_size(self): """Retrieves the size of the file-like object. Returns: int: size of the file-like object data. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened. """ if not self._file_object: raise IOError('Not opened.') return self.uncompressed_data_size
"""Determines the size of the uncompressed cached data. Returns: int: number of cached bytes. """ if None in (self._cache_start_offset, self._cache_end_offset): return 0 return self._cache_end_offset - self._cache_start_offset
DeleteCustomerArgs.ts
import { ArgsType, Field } from "@nestjs/graphql"; import { CustomerWhereUniqueInput } from "./CustomerWhereUniqueInput";
} export { DeleteCustomerArgs };
@ArgsType() class DeleteCustomerArgs { @Field(() => CustomerWhereUniqueInput, { nullable: false }) where!: CustomerWhereUniqueInput;
CoursesNew.js
import React from "react" import styled from "styled-components" import { useStaticQuery, graphql, navigate } from "gatsby" import { CardGroup, Card, black, gray, TextButton } from "@vschool/lotus" import Link from "./QueryLink" const FlexCard = styled(Card)` display: flex; flex-direction: column; align-items: flex-start; padding-top: 32px; cursor: pointer; ` const Icon = styled.img` height: 64px; margin-top: 8px; ` const CourseName = styled.h5` color: ${black}; margin-bottom: 8px; ` const CourseInfo = styled.p` color: ${gray.darker}; font-weight: 300; font-size: 16px; line-height: 24px; text-align: left; padding-top: 8px; @media (min-width: 800px) { padding-top: 16px; font-size: 20px; line-height: 32px; } ` const StyledLink = styled(Link)` margin-top: auto; ` export default function
(props) { const data = useStaticQuery(graphql` { prismicCourseCards { data { body { ... on PrismicCourseCardsBodyCourse { id primary { button_text { text } button_url { url } course_icon { url alt } course_info { text } course_name { text } } } } } } } `) const courseCards = data.prismicCourseCards.data.body.map(course => ( <FlexCard key={course.id} onClick={() => navigate(course.primary.button_url.url)} > <Icon src={course.primary.course_icon.url} alt={course.primary.course_icon.alt} /> <CourseName>{course.primary.course_name.text}</CourseName> <CourseInfo>{course.primary.course_info.text}</CourseInfo> <StyledLink to={course.primary.button_url.url}> <TextButton>{course.primary.button_text.text}</TextButton> </StyledLink> </FlexCard> )) return <CardGroup>{courseCards}</CardGroup> }
CoursesNew
test_extractor.py
""" .. codeauthor:: Tsuyoshi Hombashi <[email protected]> """ import datetime from decimal import Decimal import pytest from typepy import DateTime, RealNumber, String, Typecode from dataproperty import ( Align, DataPropertyExtractor, Format, LineBreakHandling, MatrixFormatting, Preprocessor, ) from .common import get_strict_level_map DATATIME_DATA = datetime.datetime(2017, 1, 2, 3, 4, 5) nan = float("nan") inf = float("inf") @pytest.fixture def dp_extractor(): return DataPropertyExtractor() def datetime_formatter_test(value): return value.strftime("%Y%m%d %H%M%S") def datetime_formatter_tostr_0(value): return value.strftime("%Y-%m-%d %H:%M:%S%z") def datetime_formatter_tostr_1(value): return value.strftime("%Y/%m/%d %H:%M:%S") def trans_func_1(v): if v is None: return "" if v is False: return "false" if v == 0: return 123 return v def trans_func_2(v): if v == 123: return 321 return v def nop(v): return v class Test_DataPropertyExtractor_to_dp: @pytest.mark.parametrize( ["value", "type_value_map", "is_strict", "expected_value", "expected_typecode"], [ [None, {Typecode.NONE: None}, True, None, Typecode.NONE], [None, {Typecode.NONE: "null"}, False, "null", Typecode.STRING], [None, {Typecode.NONE: ""}, True, "", Typecode.NULL_STRING], [None, {Typecode.NONE: 0}, False, 0, Typecode.INTEGER], [inf, {Typecode.INFINITY: "INF_1"}, False, "INF_1", Typecode.STRING], [inf, {Typecode.INFINITY: "INF_2"}, True, "INF_2", Typecode.STRING], [inf, {Typecode.INFINITY: None}, True, None, Typecode.NONE], ["inf", {Typecode.INFINITY: "INF_3"}, False, "INF_3", Typecode.STRING], ["inf", {Typecode.INFINITY: "INF_4"}, True, "inf", Typecode.STRING], ["inf", {Typecode.INFINITY: inf}, False, Decimal("Infinity"), Typecode.INFINITY], [nan, {Typecode.NAN: "NAN_1"}, False, "NAN_1", Typecode.STRING], [nan, {Typecode.NAN: "NAN_2"}, True, "NAN_2", Typecode.STRING], [nan, {Typecode.NAN: None}, True, None, Typecode.NONE], ["nan", {Typecode.NAN: "NAN_4"}, False, "NAN_4", Typecode.STRING], ["nan", {Typecode.NAN: "NAN_5"}, True, "nan", Typecode.STRING], ], ) def test_normal_type_value_map( self, dp_extractor, value, type_value_map, is_strict, expected_value, expected_typecode ): dp_extractor.type_value_map = type_value_map dp_extractor.strict_level_map = get_strict_level_map(is_strict) dp = dp_extractor.to_dp(value) assert dp.data == expected_value assert dp.typecode == expected_typecode assert isinstance(dp.to_str(), str) @pytest.mark.parametrize( ["value", "datetime_formatter", "datetime_format_str", "is_strict", "expected"], [ [DATATIME_DATA, datetime_formatter_tostr_0, "s", False, "2017-01-02 03:04:05"], ["2017-01-01 00:00:00", datetime_formatter_tostr_1, "s", False, "2017/01/01 00:00:00"], [ "2017-01-01 00:00:00", None, "%Y-%m-%dT%H:%M:%S", False, datetime.datetime(2017, 1, 1, 0, 0, 0), ], ["2017-01-01 00:00:00", None, "s", True, "2017-01-01 00:00:00"], ], ) def test_normal_datetime( self, dp_extractor, value, datetime_formatter, datetime_format_str, is_strict, expected ): dp_extractor.datetime_formatter = datetime_formatter dp_extractor.datetime_format_str = datetime_format_str dp_extractor.strict_level_map = get_strict_level_map(is_strict) dp = dp_extractor.to_dp(value) assert dp.data == expected @pytest.mark.parametrize( ["value", "type_hint", "trans_func", "expected"], [ [1, String, nop, "1"], [0, String, nop, "0"], [None, String, nop, "None"], [0, String, trans_func_1, "123"], [False, String, trans_func_1, "false"], [None, String, trans_func_1, ""], ], ) def test_normal_type_hint(self, dp_extractor, value, type_hint, trans_func, expected): dp_extractor.register_trans_func(trans_func) dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint) assert dp.data == expected @pytest.mark.parametrize( ["value", "type_hint", "trans_funcs", "expected"], [ [0, String, [trans_func_2, trans_func_1], "321"], [0, String, [trans_func_1, trans_func_2], "123"], ], ) def test_normal_trans_funcs(self, dp_extractor, value, type_hint, trans_funcs, expected): for trans_func in trans_funcs: dp_extractor.register_trans_func(trans_func) dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint) assert dp.data == expected class Test_DataPropertyExtractor_to_dp_quoting_flags: ALWAYS_QUOTE_FLAG_MAP = { Typecode.NONE: True, Typecode.INTEGER: True, Typecode.REAL_NUMBER: True, Typecode.STRING: True, Typecode.NULL_STRING: True, Typecode.DATETIME: True, Typecode.REAL_NUMBER: True, Typecode.NAN: True, Typecode.BOOL: True, } @pytest.mark.parametrize( ["value", "quoting_flags", "expected"], [ ["string", ALWAYS_QUOTE_FLAG_MAP, '"string"'], ['"string"', ALWAYS_QUOTE_FLAG_MAP, '"string"'], [' "123"', ALWAYS_QUOTE_FLAG_MAP, ' "123"'], ['"string" ', ALWAYS_QUOTE_FLAG_MAP, '"string" '], [' "12 345" ', ALWAYS_QUOTE_FLAG_MAP, ' "12 345" '], ], ) def test_normal_always_quote(self, dp_extractor, value, quoting_flags, expected): dp_extractor.quoting_flags = quoting_flags dp = dp_extractor.to_dp(value) assert dp.data == expected class Test_DataPropertyExtractor_to_dp_matrix: @pytest.mark.parametrize( ["value"], [ [ [ ["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"], ["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"], ] ] ], ) def test_smoke(self, dp_extractor, value): assert len(list(dp_extractor.to_dp_matrix(value))) > 0 @pytest.mark.parametrize( ["value", "type_value_map", "datetime_formatter"], [ [ [[None, "1"], [1.1, "a"], [nan, inf], ["false", DATATIME_DATA]], {Typecode.NONE: "null", Typecode.INFINITY: "INFINITY", Typecode.NAN: "NAN"}, datetime_formatter_test, ] ], ) def test_normal(self, dp_extractor, value, type_value_map, datetime_formatter): dp_extractor.type_value_map = type_value_map dp_extractor.datetime_formatter = datetime_formatter dp_matrix = list(dp_extractor.to_dp_matrix(dp_extractor.to_dp_matrix(value))) assert len(dp_matrix) == 4 dp = dp_matrix[0][0] assert dp.data == "null" assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.decimal_places is None assert dp.format_str == "{:s}" dp = dp_matrix[0][1] assert dp.data == 1 assert dp.typecode == Typecode.INTEGER assert dp.align.align_code == Align.RIGHT.align_code assert dp.align.align_string == Align.RIGHT.align_string assert dp.decimal_places == 0 assert dp.format_str == "{:d}" dp = dp_matrix[1][0] assert dp.data == Decimal("1.1") assert dp.typecode == Typecode.REAL_NUMBER assert dp.align.align_code == Align.RIGHT.align_code assert dp.align.align_string == Align.RIGHT.align_string assert dp.decimal_places == 1 assert dp.format_str == "{:.1f}" dp = dp_matrix[1][1] assert dp.data == "a" assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.decimal_places is None assert dp.format_str == "{:s}" dp = dp_matrix[2][0] assert dp.data == "NAN" assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.decimal_places is None assert dp.format_str == "{:s}" dp = dp_matrix[2][1] assert dp.data == "INFINITY" assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.decimal_places is None assert dp.format_str == "{:s}" dp = dp_matrix[3][0] assert dp.data == "false" assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.decimal_places is None assert dp.format_str == "{:s}" dp = dp_matrix[3][1] assert dp.data == "20170102 030405" assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.decimal_places is None assert dp.format_str == "{:s}" @pytest.mark.parametrize(["value", "expected"], [[None, []], [[], []], [(), []]]) def test_empty(self, dp_extractor, value, expected): assert dp_extractor.to_dp_matrix(value) == expected class Test_DataPropertyExtractor_to_dp_list: @pytest.mark.parametrize( ["value", "float_type"], [[[0.
DATA_MATRIX = [ [1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)], [2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"], [3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"], ] TEST_DATA_MATRIX_TUPLE = ( (1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)), (2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"), (3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"), ) @pytest.mark.parametrize( ["max_workers", "headers", "value"], [ [1, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX], [4, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX], [None, None, TEST_DATA_MATRIX], [None, [], TEST_DATA_MATRIX], [ None, ("i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"), TEST_DATA_MATRIX_TUPLE, ], ], ) def test_normal_default(self, dp_extractor, max_workers, headers, value): dp_extractor.max_workers = max_workers dp_extractor.headers = headers col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value)) assert len(col_dp_list) == 9 col_idx = 0 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.INTEGER assert dp.align.align_code == Align.RIGHT.align_code assert dp.align.align_string == Align.RIGHT.align_string assert dp.ascii_char_width == 1 assert dp.decimal_places == 0 assert dp.format_str == "{:d}" assert str(dp) == ( "column=0, type=INTEGER, align=right, " "ascii_width=1, bit_len=2, int_digits=1, decimal_places=0" ) col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.REAL_NUMBER assert dp.align.align_code == Align.RIGHT.align_code assert dp.align.align_string == Align.RIGHT.align_string assert dp.ascii_char_width == 4 assert dp.decimal_places == 2 assert dp.format_str == "{:.2f}" col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 4 assert dp.decimal_places is None assert dp.format_str == "{:s}" col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.REAL_NUMBER assert dp.align.align_code == Align.RIGHT.align_code assert dp.align.align_string == Align.RIGHT.align_string assert dp.ascii_char_width == 4 assert dp.decimal_places == 1 assert dp.format_str == "{:.1f}" col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 3 assert dp.decimal_places == 1 assert dp.format_str == "{:s}" col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.BOOL assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 5 assert dp.decimal_places is None assert dp.format_str == "{}" col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.INFINITY assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 8 assert dp.decimal_places is None assert dp.format_str == "{:f}" col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.NAN assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 3 assert dp.decimal_places is None assert dp.format_str == "{:f}" col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 24 assert dp.decimal_places is None assert dp.format_str == "{:s}" @pytest.mark.parametrize( ["headers", "value"], [ [ ["i", "f"], [ [1234, 1234.5], [1234567, 34.5], ], ], [ [], [ [1234, 1234.5], [1234567, 34.5], ], ], ], ) def test_normal_format_str(self, dp_extractor, headers, value): dp_extractor.format_flags_list = [Format.THOUSAND_SEPARATOR, Format.THOUSAND_SEPARATOR] dp_extractor.max_workers = 1 dp_extractor.headers = headers col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value)) assert len(col_dp_list) == 2 col_idx = 0 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.INTEGER assert dp.format_str == "{:,d}" assert dp.ascii_char_width == 9 col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.REAL_NUMBER assert dp.format_str == "{:,.1f}" assert dp.ascii_char_width == 7 @pytest.mark.parametrize( ["headers", "value"], [ [["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX], [None, TEST_DATA_MATRIX], [[], TEST_DATA_MATRIX], ], ) def test_normal_not_strict(self, dp_extractor, headers, value): dp_extractor.headers = headers col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value)) assert len(col_dp_list) == 9 dp = col_dp_list[0] assert dp.typecode == Typecode.INTEGER assert dp.align.align_code == Align.RIGHT.align_code assert dp.align.align_string == Align.RIGHT.align_string assert dp.ascii_char_width == 1 assert dp.decimal_places == 0 assert dp.format_str == "{:d}" dp = col_dp_list[1] assert dp.typecode == Typecode.REAL_NUMBER assert dp.align.align_code == Align.RIGHT.align_code assert dp.align.align_string == Align.RIGHT.align_string assert dp.ascii_char_width == 4 assert dp.decimal_places == 2 assert dp.format_str == "{:.2f}" def test_normal_column_type_hints(self, dp_extractor): data_matrix = [ [1, "1.1", 1, "2017-01-02 03:04:05"], [2, "2.2", 0.1, "2017-01-02 03:04:05"], ] dp_extractor.headers = ["none", "to_float", "to_str", "to_datetime"] dp_extractor.column_type_hints = [None, RealNumber, String, DateTime] assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime] col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix)) assert len(col_dp_list) == 4 assert col_dp_list[0].typecode == Typecode.INTEGER assert col_dp_list[1].typecode == Typecode.REAL_NUMBER assert col_dp_list[2].typecode == Typecode.STRING assert col_dp_list[3].typecode == Typecode.DATETIME dp_extractor.column_type_hints = ["", "float", "str", "datetime"] assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime] col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix)) assert len(col_dp_list) == 4 assert col_dp_list[0].typecode == Typecode.INTEGER assert col_dp_list[1].typecode == Typecode.REAL_NUMBER assert col_dp_list[2].typecode == Typecode.STRING assert col_dp_list[3].typecode == Typecode.DATETIME def test_normal_max_precision(self): extractor = DataPropertyExtractor(max_precision=3) extractor.headers = ["i", "f"] value = [ [1234, 0.0000000001], [1234567, 34.5], ] col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value)) assert len(col_dp_list) == 2 col_idx = 0 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.INTEGER assert dp.decimal_places == 0 col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.REAL_NUMBER assert dp.decimal_places == 3 # test setter extractor.max_precision = 1 col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value)) assert len(col_dp_list) == 2 col_idx = 0 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.INTEGER assert dp.decimal_places == 0 col_idx += 1 dp = col_dp_list[col_idx] assert dp.column_index == col_idx assert dp.typecode == Typecode.REAL_NUMBER assert dp.decimal_places == 1 def test_normal_nan_inf(self, dp_extractor): dp_extractor.headers = ["n", "i"] col_dp_list = dp_extractor.to_column_dp_list( dp_extractor.to_dp_matrix([[nan, inf], ["nan", "inf"]]) ) assert len(col_dp_list) == 2 dp = col_dp_list[0] assert dp.typecode == Typecode.NAN assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 3 assert dp.decimal_places is None dp = col_dp_list[1] assert dp.typecode == Typecode.INFINITY assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 8 assert dp.decimal_places is None @pytest.mark.parametrize(["ambiguous_width"], [[2], [1]]) def test_normal_east_asian_ambiguous_width(self, dp_extractor, ambiguous_width): dp_extractor.headers = ["ascii", "eaa"] dp_extractor.east_asian_ambiguous_width = ambiguous_width col_dp_list = dp_extractor.to_column_dp_list( dp_extractor.to_dp_matrix([["abcdefg", "Øαββ"], ["abcdefghij", "ØØ"]]) ) assert len(col_dp_list) == 2 dp = col_dp_list[0] assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 10 assert dp.decimal_places is None dp = col_dp_list[1] assert dp.typecode == Typecode.STRING assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 4 * ambiguous_width assert dp.decimal_places is None def test_normal_empty_value(self, dp_extractor): dp_extractor.headers = ["a", "22", "cccc"] col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(None)) dp = col_dp_list[0] assert dp.typecode == Typecode.NONE assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 1 assert dp.decimal_places is None assert dp.format_str == "{}" dp = col_dp_list[1] assert dp.typecode == Typecode.NONE assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 2 assert dp.decimal_places is None assert dp.format_str == "{}" dp = col_dp_list[2] assert dp.typecode == Typecode.NONE assert dp.align.align_code == Align.LEFT.align_code assert dp.align.align_string == Align.LEFT.align_string assert dp.ascii_char_width == 4 assert dp.decimal_places is None assert dp.format_str == "{}" class Test_DataPropertyExtractor_matrix_formatting: TEST_DATA_MATRIX_NORMAL_COL3 = [["a", 0, "aa"], ["b", 1, "bb"], ["c", 2, "ccc"]] TEST_DATA_MATRIX_NOUNIFORM_COL1 = [["a", 0], ["b", 1, "bb"], ["c", 2, "ccc", 0.1], ["d"]] @pytest.mark.parametrize( ["headers", "value", "matrix_formatting", "expected"], [ [None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.TRIM, 1], [["a", "b"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.TRIM, 2], [None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.FILL_NONE, 4], [["a", "b", "c"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.FILL_NONE, 3], [["a", "b", "c"], TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.HEADER_ALIGNED, 3], [ ["a", "b", "c", "d", "e"], TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.HEADER_ALIGNED, 5, ], ], ) def test_normal_matrix_formatting( self, dp_extractor, headers, value, matrix_formatting, expected ): dp_extractor.headers = headers dp_extractor.matrix_formatting = matrix_formatting col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value)) assert len(col_dp_list) == expected @pytest.mark.parametrize( ["headers", "value", "matrix_formatting", "expected"], [ [ ["i", "f", "s", "if", "mix"], TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.EXCEPTION, ValueError, ] ], ) def test_exception_matrix_formatting( self, dp_extractor, headers, value, matrix_formatting, expected ): dp_extractor.headers = headers dp_extractor.matrix_formatting = matrix_formatting with pytest.raises(expected): dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value)) class Test_DataPropertyExtractor_update_preprocessor: def test_normal(self, dp_extractor): assert dp_extractor.preprocessor.strip_str is None assert dp_extractor.preprocessor.replace_tabs_with_spaces is True assert dp_extractor.preprocessor.tab_length == 2 assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.NOP assert dp_extractor.preprocessor.line_break_repl == " " assert dp_extractor.preprocessor.is_escape_html_tag is False assert dp_extractor.preprocessor.is_escape_formula_injection is False dp_extractor.update_preprocessor( strip_str='"', replace_tabs_with_spaces=False, tab_length=4, line_break_handling=LineBreakHandling.REPLACE, line_break_repl="<br>", is_escape_html_tag=True, is_escape_formula_injection=True, ) assert dp_extractor.preprocessor.strip_str == '"' assert dp_extractor.preprocessor.replace_tabs_with_spaces is False assert dp_extractor.preprocessor.tab_length == 4 assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.REPLACE assert dp_extractor.preprocessor.line_break_repl == "<br>" assert dp_extractor.preprocessor.is_escape_html_tag is True assert dp_extractor.preprocessor.is_escape_formula_injection is True
1, Decimal("1.1")], float], [[0.1, Decimal("1.1")], Decimal]] ) def test_normal_float(self, dp_extractor, value, float_type): dp_extractor.float_type = float_type dp_list = dp_extractor.to_dp_list(value) for dp in dp_list: assert isinstance(dp.data, float_type) @pytest.mark.parametrize( ["value", "type_hint", "expected_list"], [ [ ["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)], None, [Typecode.STRING, Typecode.DATETIME], ], [ ["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)], DateTime, [Typecode.DATETIME, Typecode.DATETIME], ], ], ) def test_normal_type_hint(self, dp_extractor, value, type_hint, expected_list): dp_extractor.default_type_hint = type_hint dp_list = dp_extractor.to_dp_list(value) for dp, expected in zip(dp_list, expected_list): assert dp.typecode == expected @pytest.mark.parametrize( ["value", "strip_str_header", "strip_str_value", "expected"], [ [['"1"', '"-1.1"', '"abc"'], "", '"', [1, Decimal("-1.1"), "abc"]], [['"1"', '"-1.1"', '"abc"'], '"', "", ['"1"', '"-1.1"', '"abc"']], [['"1"', '"-1.1"', '"abc"'], None, None, ['"1"', '"-1.1"', '"abc"']], ], ) def test_normal_strip_str( self, dp_extractor, value, strip_str_header, strip_str_value, expected ): dp_extractor.strip_str_header = strip_str_header dp_extractor.preprocessor = Preprocessor(strip_str=strip_str_value) dp_list = dp_extractor.to_dp_list(value) for dp, expected_value in zip(dp_list, expected): assert dp.data == expected_value dp_matrix = dp_extractor.to_dp_matrix([value]) for dp, expected_value in zip(dp_matrix[0], expected): assert dp.data == expected_value @pytest.mark.parametrize( ["value", "line_break_handling", "expected"], [ [["a\nb", "a\r\nb"], LineBreakHandling.NOP, ["a\nb", "a\r\nb"]], [["a\nb", "a\r\nb"], LineBreakHandling.REPLACE, ["a b", "a b"]], [["a\nb", "a\r\nb"], LineBreakHandling.ESCAPE, ["a\\nb", "a\\r\\nb"]], ], ) def test_normal_line_break_handling(self, dp_extractor, value, line_break_handling, expected): dp_extractor.preprocessor = Preprocessor(line_break_handling=line_break_handling) dp_list = dp_extractor.to_dp_list(value) for dp, value in zip(dp_list, expected): assert dp.data == value @pytest.mark.parametrize( ["value", "line_break_handling", "line_break_repl", "expected"], [ [["a\nb", "a\r\nb"], LineBreakHandling.NOP, "<br>", ["a\nb", "a\r\nb"]], [ ["a\nb", "a\r\nb", "a\r\n\nb"], LineBreakHandling.REPLACE, "<br>", ["a<br>b", "a<br>b", "a<br><br>b"], ], ], ) def test_normal_line_break_repl( self, dp_extractor, value, line_break_handling, line_break_repl, expected ): dp_extractor.preprocessor = Preprocessor( line_break_handling=line_break_handling, line_break_repl=line_break_repl ) dp_list = dp_extractor.to_dp_list(value) for dp, value in zip(dp_list, expected): assert dp.data == value, value @pytest.mark.parametrize( ["value", "escape_formula_injection", "expected"], [ [ ["a+b", "=a+b", "-a+b", "+a+b", "@a+b"], True, ["a+b", "'=a+b", "'-a+b", "'+a+b", "'@a+b"], ], [ ["a+b", "=a+b", "-a+b", "+a+b", "@a+b"], False, ["a+b", "=a+b", "-a+b", "+a+b", "@a+b"], ], ], ) def test_normal_escape_formula_injection( self, dp_extractor, value, escape_formula_injection, expected ): dp_extractor.preprocessor = Preprocessor( is_escape_formula_injection=escape_formula_injection ) dp_list = dp_extractor.to_dp_list(value) for dp, value in zip(dp_list, expected): assert dp.data == value, value @pytest.mark.parametrize( ["value", "expected"], [[[0, None], [0, None]]], ) def test_exception_escape_formula_injection(self, dp_extractor, value, expected): dp_extractor.preprocessor = Preprocessor(is_escape_formula_injection=True) dp_list = dp_extractor.to_dp_list(value) for dp, value in zip(dp_list, expected): assert dp.data == value, value class Test_DataPropertyExtractor_to_column_dp_list: TEST_
config.py
SQLALCHEMY_DATABASE_URI = "postgresql:///test_freight" LOG_LEVEL = "INFO" WORKSPACE_ROOT = "/tmp/freight-tests"
SSH_PRIVATE_KEY = "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEArvyc+vZVxUjC5ZcFg1VN3jQOCOjO94gwQKFxlz0zOCrCz+Sq\nnWk28YdUpOU016Zinlh4ZZk2136nCKKTMnNMjd6cTTCn5fWomjR+F2CSdaYYpYfO\nNtVnq0SIDUgGmjyPncOGrxVT6EzjjSvgE8W8YIc5rVJqNMAH5OywUH0nqISYN2yP\nwbUPVf8zqu3kpnTt7YcWZ+Ye4b3jX6Fo2Xw5P1TTwQ92K9JdVAltBRpwSLtBQUYC\nMkwtNf6QIbRYKoVZuEhi/8XCxT0zG78Lsqpbld8IEnLWUGifCtx9mKqVi8Y3QTsT\nknMWFaf+Su8htgw/W7tufmrtTKNJYDtPTGiBeQIDAQABAoIBABYsC/gAnn2Q6qEM\nsbYiaOtuzRhz50WWDAckbbAsIQFM6cJNxxCK9FtGOoNqR3fLrVNDAn5dG4XSlneR\nofUShvCy9DsTnzKUHfjsDc4IfoZJtXXD720jPS+GT3bfWXbRlaD31Wj52tfkZjDN\nDmdy9puEhtpfRvXIHzfyhaStNwkzDh0jp8e8yok1mLA+3FPqkJPF6ptxPs6HEQS8\npY75jxvypbux2+W9249J/HqMmd5/+r7tt62vciqnXb2LG2AmUxLhTAQU9mGM2OSL\nrh2j+7/2apEQLdJ0DbS19IkQZRpO/DLPyhg6C29ZuNQffQWoLiZlfgIEaBT939aM\nkFdzy8ECgYEA4BdisLRCyCdm2M7fMDsV7j71z48Q1Kdl5A6/ngiK1dCwnjRMvkLx\nKOHtmvpJxHTH+JAewrrGUg0GF1YpM3gi0FQ7f9qTlAeFIrU3udV8F/m6+rIOpx92\nB2FSrYTaonLX8g4OzXKNtQcwzx91mFWTIEmfQl9let0WMrCRzReXp0sCgYEAx+dC\ncbERCVcJvs9+SUwVXXOreCF4PedLrg7bjkfYSpmAJk9c36EOi1jIGO5rat5/k7Nb\n0plWghADjtcb4r8oO6pzhMR81cESgFOk1UasP4rPYX4mEYPBwVGgN7ECUXj9XFPZ\n/tk7lgneBc1/6eV978MTprXiHU5Rv7yZBMuf68sCgYAd6YE27Rjs9rV3w0VvfrOS\ntbzCE+q/OAkVxBI32hQOLmkk9P45d14RgvbgdQBbxOrcdwBkJeJLGYnym4GsaSDc\nhiHbEyYX4FkZJO9nUuPZn3Ah/pqOHFj46zjKCK3WeVXx7YZ0ThI0U91kCGL+Do4x\nBSLJDUrSd6h6467SnY+UuQKBgGV0/AYT5h+lay7KxL+Su+04Pbi01AAnGgP3SnuF\n/0KtcZsAAJUHewhCQRxWNXKCBqICEAJtDLjqQ8QFbQPCHTtbIVIrH2ilmyxCR5Bv\nVBDT9Lj4e328L2Rcd0KMti5/h6eKb0OnIVTfIS40xE0Dys0bZyfffCl/jIIRyF/k\nsP/NAoGBAIfxtr881cDFrxahrTJ3AtGXxjJjMUW/S6+gKd7Lj9i+Uadb9vjD8Wt8\ngWrUDwXVAhD5Sxv+OCBizPF1CxXTgC3+/ophkUcy5VTcBchgQI7JrItujxUc0EvR\nCwA7/JPyO8DaUtvpodUKO27vr11G/NmXYrOohCP6VxH/Y6p5L9o4\n-----END RSA PRIVATE KEY-----" GITHUB_TOKEN = "a" * 40
units.rs
use std::str::FromStr; use crate::Error; #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum Units { Human, Si, } impl Units { // Why do you even need this, when clap provides `arg_enum!` macro? // I just do not like that results are capitalized. // Who the hell want to write manually arguments like `-u Human`? // `-u human` is much prettier. pub fn arg_variants() -> [&'static str; 2] { ["human", "si"] } } impl FromStr for Units { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { match () { _ if s.eq_ignore_ascii_case("human") => Ok(Units::Human),
} }
_ if s.eq_ignore_ascii_case("si") => Ok(Units::Si), _ => Err(Error::ParseError), }
driver_test.go
package database_test import ( "fmt" "github.com/parallelcointeam/pod/database" _ "github.com/parallelcointeam/pod/database/ffldb" "testing" ) var ( // ignoreDbTypes are types which should be ignored when running tests that iterate all supported DB types. This allows some tests to add bogus drivers for testing purposes while still allowing other tests to easily iterate all supported drivers. ignoreDbTypes = map[string]bool{"createopenfail": true} ) // checkDbError ensures the passed error is a database.Error with an error code that matches the passed error code. func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool { dbErr, ok := gotErr.(database.Error) if !ok { t.Errorf("%s: unexpected error type - got %T, want %T", testName, gotErr, database.Error{}) return false } if dbErr.ErrorCode != wantErrCode { t.Errorf("%s: unexpected error code - got %s (%s), want %s", testName, dbErr.ErrorCode, dbErr.Description, wantErrCode) return false } return true } // TestAddDuplicateDriver ensures that adding a duplicate driver does not overwrite an existing one. func TestAddDuplicateDriver(t *testing.T) { supportedDrivers := database.SupportedDrivers() if len(supportedDrivers) == 0 { t.Errorf("no backends to test") return } dbType := supportedDrivers[0] // bogusCreateDB is a function which acts as a bogus create and open driver function and intentionally returns a failure that can be detected if the interface allows a duplicate driver to overwrite an // existing one. bogusCreateDB := func(args ...interface{}) (database.DB, error) { return nil, fmt.Errorf("duplicate driver allowed for database "+ "type [%v]", dbType) } // Create a driver that tries to replace an existing one. Set its create and open functions to a function that causes a test failure if they are invoked. driver := database.Driver{ DbType: dbType, Create: bogusCreateDB, Open: bogusCreateDB, } testName := "duplicate driver registration" err := database.RegisterDriver(driver) if !checkDbError(t, testName, err, database.ErrDbTypeRegistered) { return } } // TestCreateOpenFail ensures that errors which occur while opening or closing a database are handled properly. func TestCreateOpenFail(t *testing.T) { // bogusCreateDB is a function which acts as a bogus create and open driver function that intentionally returns a failure which can be detected. dbType := "createopenfail" openError := fmt.Errorf("failed to create or open database for "+ "database type [%v]", dbType) bogusCreateDB := func(args ...interface{}) (database.DB, error) { return nil, openError } // Create and add driver that intentionally fails when created or opened to ensure errors on database open and create are handled properly. driver := database.Driver{ DbType: dbType, Create: bogusCreateDB, Open: bogusCreateDB, } database.RegisterDriver(driver) // Ensure creating a database with the new type fails with the expected error. _, err := database.Create(dbType) if err != openError { t.Errorf("expected error not received - got: %v, want %v", err, openError) return } // Ensure opening a database with the new type fails with the expected error. _, err = database.Open(dbType) if err != openError { t.Errorf("expected error not received - got: %v, want %v", err, openError) return } } // TestCreateOpenUnsupported ensures that attempting to create or open an unsupported database type is handled properly. func TestCreateOpenUnsupported(t *testing.T)
{ // Ensure creating a database with an unsupported type fails with the expected error. testName := "create with unsupported database type" dbType := "unsupported" _, err := database.Create(dbType) if !checkDbError(t, testName, err, database.ErrDbUnknownType) { return } // Ensure opening a database with the an unsupported type fails with the expected error. testName = "open with unsupported database type" _, err = database.Open(dbType) if !checkDbError(t, testName, err, database.ErrDbUnknownType) { return } }
util.py
from __future__ import absolute_import from __future__ import print_function import math import veriloggen.core.vtypes as vtypes import veriloggen.seq.seq as seq t_Input = 'Input' t_Output = 'Output' t_Wire = 'Wire' t_Reg = 'Reg' t_OutputReg = 'OutputReg' def swap_type(cls): return cls._O, cls._I def make_port(m, _type, *args, **kwargs): if 'no_reg' in kwargs and kwargs['no_reg']: _type = _type.replace('Reg', '') if len(_type) == 0: _type = 'Wire' if 'no_reg' in kwargs: del kwargs['no_reg'] if 'initval' in kwargs and 'Reg' not in _type: del kwargs['initval'] return getattr(m, _type)(*args, **kwargs) def connect_port(left, right):
def log2(value, maxsize=32): if isinstance(value, (int, bool, float)): return int(math.ceil(math.log(value, 2))) patterns = [] for i in range(1, maxsize): patterns.append((value < 2 ** i, i)) return vtypes.PatternMux(patterns) def add_mux(targ, cond, value): prev_assign = targ._get_assign() if not prev_assign: targ.assign(vtypes.Mux(cond, value, vtypes.IntX())) else: prev_value = prev_assign.statement.right prev_assign.overwrite_right( vtypes.Mux(cond, value, prev_value)) targ.module.remove(prev_assign) targ.module.append(prev_assign) def add_enable_cond(targ, cond, value): prev_assign = targ._get_assign() if not prev_assign: targ.assign(vtypes.Mux(cond, value, 0)) else: prev_value = prev_assign.statement.right prev_assign.overwrite_right( vtypes.Mux(cond, value, prev_value)) targ.module.remove(prev_assign) targ.module.append(prev_assign) def add_disable_cond(targ, cond, value): prev_assign = targ._get_assign() if not prev_assign: targ.assign(vtypes.Mux(cond, value, 1)) else: prev_value = prev_assign.statement.right prev_assign.overwrite_right( vtypes.Ands(vtypes.Mux(cond, value, 1), prev_value)) targ.module.remove(prev_assign) targ.module.append(prev_assign)
if isinstance(left, vtypes.Reg): wire_left = left.module.TmpWireLike(left) wire_left.assign(right) left.module.Always()(left(wire_left, blk=True)) else: left.assign(right)
test.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use super::*; use proptest::prelude::*; use schemadb::{schema::fuzzing::assert_encode_decode, test_no_panic_decoding}; proptest! { #[test] fn test_encode_decode(pos in any::<u64>(), hash in any::<HashValue>()) {
&(pos, Position::from_inorder_index(pos)), &hash, ); } } test_no_panic_decoding!(EventAccumulatorSchema);
assert_encode_decode::<EventAccumulatorSchema>(
test_summary.py
import pytest from ga4gh.testbed.report.summary import Summary increment_inputs = "count_type," \ + "use_n," \ + "n," increment_cases = [ ("unknown", False, 1), ("unknown", True, 3), ("passed", False, 1), ("passed", True, 4), ("warned", False, 1), ("warned", True, 5), ("failed", False, 1), ("failed", True, 6), ("skipped", False, 1), ("skipped", True, 7) ] summary_total_inputs = "unknown,passed,warned,failed,skipped,total" summary_total_cases = [ (1, 1, 1, 1, 1, 5), (10, 4, 6, 7, 12, 39) ] aggregate_summary_inputs = "counts_a,counts_b,counts_exp" aggregate_summary_cases = [ ( [1, 3, 5, 7, 9], [2, 4, 6, 8, 10], [3, 7, 11, 15, 19] ), ( [15, 9, 6, 12, 13], [42, 47, 31, 27, 26], [57, 56, 37, 39, 39] ) ] @pytest.mark.parametrize(increment_inputs, increment_cases) def test_summary_increment(count_type, use_n, n): summary = Summary() increment_fn_name = "increment_" + count_type getter_fn_name = "get_" + count_type increment_fn = getattr(summary, increment_fn_name) getter_fn = getattr(summary, getter_fn_name) if use_n: increment_fn(n=n) else: increment_fn() assert getter_fn() == n @pytest.mark.parametrize(summary_total_inputs, summary_total_cases) def test_summary_get_total(unknown, passed, warned, failed, skipped, total): summary = Summary() summary.increment_unknown(n=unknown) summary.increment_passed(n=passed) summary.increment_warned(n=warned) summary.increment_failed(n=failed) summary.increment_skipped(n=skipped) assert summary.get_total() == total @pytest.mark.parametrize(aggregate_summary_inputs, aggregate_summary_cases) def test_aggregate_summary(counts_a, counts_b, counts_exp): def prep_summary(summary, counts): summary.increment_unknown(n=counts[0]) summary.increment_passed(n=counts[1]) summary.increment_warned(n=counts[2]) summary.increment_failed(n=counts[3]) summary.increment_skipped(n=counts[4]) def
(summary, counts): assert summary.get_unknown() == counts[0] assert summary.get_passed() == counts[1] assert summary.get_warned() == counts[2] assert summary.get_failed() == counts[3] assert summary.get_skipped() == counts[4] summary_a = Summary() summary_b = Summary() prep_summary(summary_a, counts_a) prep_summary(summary_b, counts_b) summary_a.aggregate_summary(summary_b) assert_summary(summary_a, counts_exp)
assert_summary
app.js
var createError = require('http-errors'); var express = require('express'); var path = require('path'); var cookieParser = require('cookie-parser'); var logger = require('morgan'); var indexRouter = require('./routes/index'); var usersRouter = require('./routes/users'); var postsRouter = require('./routes/posts'); var app = express(); // view engine setup app.set('views', path.join(__dirname, 'views')); app.set('view engine', 'ejs'); app.use(logger('dev')); app.use(express.json()); app.use(express.urlencoded({ extended: false })); app.use(cookieParser());
app.use('/', indexRouter); app.use('/users', usersRouter); app.use('/posts', postsRouter); // catch 404 and forward to error handler app.use(function(req, res, next) { next(createError(404)); }); // error handler app.use(function(err, req, res, next) { // set locals, only providing error in development res.locals.message = err.message; res.locals.error = req.app.get('env') === 'development' ? err : {}; // render the error page res.status(err.status || 500); res.render('error'); }); module.exports = app;
app.use(express.static(path.join(__dirname, 'public')));
samplerate_retry.py
# Colleen Josephson, 2013 # This file attempts to implement the SampleRate bit rate selection algorithm # as outlined in the JBicket MS Thesis. from __future__ import division from random import choice from rates import ieee80211_to_idx import rates npkts = 0 #number of packets sent over link nsuccess = 0 #number of packets sent successfully NBYTES = 1500 #constant currRate = 54 #current best bitRate NRETRIES = 1 # The average back-off period, in microseconds, for up to 8 attempts of a 802.11b unicast packet. # TODO: find g data backoff = {0:0, 1:155, 2:315, 3:635, 4:1275, 5:2555, 6:5115, 7:5115, 8:5115, 9:5115, 10:5115, 11:5115, 12:5115, 13:5115, 14:5115, 15:5115, 16:5115, 17:5115, 18:5115, 19:5115, 20:5115} def bitrate_type(bitrate): return rates.RATES[ieee80211_to_idx(bitrate)].phy #"To calculate the transmission time of a n-byte unicast packet given the bit-rate b and # number of retries r, SampleRate uses the following equation based on the 802.11 unicast # retransmission mechanism detailed in Section 2.2" # # tx_time(b, r, n) = difs + backoff[r] + (r + 1)*(sifs + ack + header + (n * 8/b)) def tx_time(bitrate, retries, nbytes): # bitrate in MBPS, since 1*10^6 bps / 10-6 seconds/microseconds = 1 bit per microsecond global currRate, npkts, nsuccess, NBYTES brtype = bitrate_type(bitrate) if bitrate == 1: difs = 50 sifs = 10 ack = 304 header = 192 elif brtype == "ds" or brtype == "dsss": difs = 50 sifs = 10 ack = 304 header = 96 elif brtype == "ofdm": difs = 28 sifs = 9 ack = 304 # Somehow 6mb acks aren't used header = 20 else: raise ValueError("Unknown bitrate type", brtype, bitrate) return difs + backoff[retries] + (retries+1)*(sifs + ack + header + (nbytes * 8/(bitrate))) class Packet: def __init__(self, time_sent, success, txTime, rate): self.time_sent = time_sent self.success = success self.txTime = txTime self.rate = rate def __repr__(self): return ("Pkt sent at time %r, rate %r was successful: %r\n" % (self.time_sent, self.rate, self.success)) class Rate: def __init__(self, rate): self.rate = rate #in mbps self.success = 0 self.tries = 0 self.pktAcked = 0 self.succFails = 0 self.totalTX = 0 self.avgTX = float("inf") #pktsize/channelrate. pktsize = 1500 bytes self.losslessTX = tx_time(rate, 0, 1500) #microseconds self.window = [] #packets rcvd in last 10s def __repr__(self): return ("Bitrate %r mbps: \n" " tries: %r \n" " pktsAcked: %r \n" " succFails: %r \n" " totalTX: %r microseconds \n" " avgTx: %r microseconds \n" " losslessTX: %r microseconds" % (self.rate, self.tries, self.pktAcked, self.succFails, self.totalTX, self.avgTX, self.losslessTX)) # The modulation scheme used in 802.11g is orthogonal frequency-division multiplexing (OFDM) # copied from 802.11a with data rates of 6, 9, 12, 18, 24, 36, 48, and 54 Mbit/s, and reverts # to CCK (like the 802.11b standard) for 5.5 and 11 Mbit/s and DBPSK/DQPSK+DSSS for 1 and 2 Mbit/s. # Even though 802.11g operates in the same frequency band as 802.11b, it can achieve higher # data rates because of its heritage to 802.11a. RATES = dict((r, Rate(r)) for r in [1, 2, 5.5, 6, 9, 11, 12, 18, 24, 36, 48, 54]) #multi-rate retry returns an array of (rate, ntries) for the next n packets def apply_rate(cur_time): global currRate, npkts, nsuccess, NBYTES, NRETRIES remove_stale_results(cur_time) #"Increment the number of packets sent over the link" npkts += 1 #"If no packets have been successfully acknowledged, return the # highest bit-rate that has not had 4 successive failures." if nsuccess == 0: rrates = [r[1] for r in sorted(RATES.items())] rrates.reverse() retry = [] for r in rrates: if r.succFails < 4: currRate = r.rate retry.append((ieee80211_to_idx(currRate), NRETRIES)) return retry # Every 10 packets, select a random non-failing bit rate w/ better avg tx #"If the number of packets sent over the link is a multiple of ten," if (nsuccess != 0) and (npkts%10 == 0): #"select a random bit-rate from the bit-rates" cavgTX = RATES[currRate].avgTX #" that have not failed four successive times and that #have a minimum packet transmission time lower than the #current bit-rate's average transmission time." eligible = [r for i, r in RATES.items() if r.losslessTX < cavgTX and r.succFails < 4] if len(eligible) > 0: sampleRate = choice(eligible).rate #select random rate from eligible return [(ieee80211_to_idx(sampleRate), NRETRIES)] #"Otherwise, send packet at the bit-rate that has the lowest avg transmission time" # Trusts that currRate is properly maintained to be lowest avgTX return [(ieee80211_to_idx(currRate), NRETRIES)] #"When process f eedback() runs, it updates information that tracks # the number of samples and recalculates the average transmission # time for the bit-rate and destination. process_feedback() performs # the following operations:" def process_feedback(status, timestamp, delay, tries): #status: true if packet was rcvd successfully #timestamp: time pkt was sent #delay: rtt for entire process (inluding multiple tries) in nanoseconds #tries: an array of (bitrate, nretries) global currRate, npkts, nsuccess, NBYTES (bitrate, nretries) = tries[0] nretries -= 1 bitrate = rates.RATES[bitrate].mbps #"Calculate the transmission time for the packet based on the # bit-rate and number of retries using Equation 5.1 below." tx = tx_time(bitrate, nretries, NBYTES) #"Look up the destination and add the transmission time to the # total transmission times for the bit-rate." br = RATES[bitrate] if not status: br.succFails += 1 #"If the packet failed, increment the number of successive # failures for the bit-rate. else: #"Otherwise reset it." br.succFails = 0 #"If the packet succeeded, increment the number of successful # packets sent at that bit-rate. br.success += 1 nsuccess += 1 #"Re-calculate the average transmission time for the bit-rate # based on the sum of trans- mission times and the number of # successful packets sent at that bit-rate." br.totalTX += tx if br.success == 0: br.avgTX = float("inf") else: br.avgTX = br.totalTX/br.success #"Set the current-bit rate for the destination to the one with the # minimum average transmission time." calculateMin() #"Append the current time, packet status, transmission time, and # bit-rate to the list of transmission results." p = Packet(timestamp, status, tx, bitrate) br.window.append(p) #"SampleRate's remove stale results() function removes results from # the transmission results queue that were obtained longer than ten # seconds ago." def remove_stale_results(cur_time): window_cutoff = cur_time - 1e10 #window size of 10s for r in RATES.values(): for p in r.window: #"For each stale transmission result, it does the following" if p.time_sent < window_cutoff: #"Remove the transmission time from the total transmission times # at that bit-rate to that destination." r.window.remove(p) r.totalTX -= p.txTime #"If the packet succeeded, decrement the number of # successful packets at that bit-rate to that # destination." if p.success: r.success -= 1 #"After remove stale results() performs these operations for #each stale sample, it recalculates the minimum average #transmission times for each bit-rate and destination. if r.success == 0: r.avgTX = float("inf") else: r.avgTX = r.totalTX/r.success for r in RATES.values(): succFails = 0 maxSuccFails = 0 for p in r.window: if p.success: if succFails > maxSuccFails: maxSuccFails = succFails succFails = 0 else: succFails += 1 if succFails > maxSuccFails: maxSuccFails = succFails r.succFails = maxSuccFails #"remove_stale_results() then sets the current bit-rate for each # destination to the one with the smallest average trans- mission # time." calculateMin() def calculateMin(): global currRate, npkts, nsuccess, NBYTES #set current rate to the one w/ min avg tx time c = RATES[currRate] if c.succFails > 4: c.avgTX = float("inf") #c = rates[1] for i, r in sorted(RATES.items(), reverse=True): #print("------------------------------------------------")
and r.succFails == 0 and r.losslessTX < c.avgTX: #print ("c = %r " % c) #print ("r = %r " %r) c = r break #print("------------------------------------------------") if c.avgTX > r.avgTX and r.succFails < 4: c = r currRate = c.rate
#we've never tried this rate thoroughly before if r.rate < c.rate and r.avgTX == float("inf") \
config.py
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import configparser import getpass import itertools import os import re from abc import ABC, abstractmethod from dataclasses import dataclass from functools import partial from hashlib import sha1 from typing import Any, ClassVar, Dict, Iterable, List, Mapping, Sequence, Union, cast import toml from typing_extensions import Protocol from pants.base.build_environment import get_buildroot from pants.option.ranked_value import Value from pants.util.eval import parse_expression from pants.util.ordered_set import OrderedSet # A dict with optional override seed values for buildroot, pants_workdir, and pants_distdir. SeedValues = Dict[str, Value] class ConfigSource(Protocol): """A protocol that matches pants.engine.fs.FileContent. Also matches the ad-hoc FileContent-like class we use during options bootstrapping, where we cannot use pants.engine.fs.FileContent itself due to circular imports. """ @property def path(self) -> str: raise NotImplementedError() @property def content(self) -> bytes: raise NotImplementedError() class Config(ABC): """Encapsulates config file loading and access, including encapsulation of support for multiple config files. Supports variable substitution using old-style Python format strings. E.g., %(var_name)s will be replaced with the value of var_name. """ DEFAULT_SECTION: ClassVar[str] = configparser.DEFAULTSECT class ConfigError(Exception): pass class ConfigValidationError(ConfigError): pass @classmethod def load( cls, file_contents: Iterable[ConfigSource], *, seed_values: SeedValues | None = None, ) -> Config: """Loads config from the given string payloads, with later payloads overriding earlier ones. A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT section, and be available for use in substitutions. The caller may override some of these seed values. """ single_file_configs = [] for file_content in file_contents: content_digest = sha1(file_content.content).hexdigest() normalized_seed_values = cls._determine_seed_values(seed_values=seed_values) try: config_values = cls._parse_toml( file_content.content.decode(), normalized_seed_values ) except Exception as e: raise cls.ConfigError( f"Config file {file_content.path} could not be parsed as TOML:\n {e}" ) single_file_configs.append( _SingleFileConfig( config_path=file_content.path, content_digest=content_digest, values=config_values, ), ) return _ChainedConfig(tuple(reversed(single_file_configs))) @classmethod def _parse_toml( cls, config_content: str, normalized_seed_values: dict[str, str] ) -> _ConfigValues: """Attempt to parse as TOML, raising an exception on failure.""" toml_values = cast(Dict[str, Any], toml.loads(config_content)) toml_values["DEFAULT"] = { **normalized_seed_values, **toml_values.get("DEFAULT", {}), } return _ConfigValues(toml_values) @staticmethod def _determine_seed_values(*, seed_values: SeedValues | None = None) -> dict[str, str]: """We pre-populate several default values to allow %([key-name])s interpolation. This sets up those defaults and checks if the user overrode any of the values. """ safe_seed_values = seed_values or {} buildroot = cast(str, safe_seed_values.get("buildroot", get_buildroot())) all_seed_values: dict[str, str] = { "buildroot": buildroot, "homedir": os.path.expanduser("~"), "user": getpass.getuser(), } def update_seed_values(key: str, *, default_dir: str) -> None: all_seed_values[key] = cast( str, safe_seed_values.get(key, os.path.join(buildroot, default_dir)) ) update_seed_values("pants_workdir", default_dir=".pants.d") update_seed_values("pants_distdir", default_dir="dist") return all_seed_values def get(self, section, option, type_=str, default=None): """Retrieves option from the specified section (or 'DEFAULT') and attempts to parse it as type. If the specified section does not exist or is missing a definition for the option, the value is looked up in the DEFAULT section. If there is still no definition found, the default value supplied is returned. """ if not self.has_option(section, option): return default raw_value = self.get_value(section, option) if issubclass(type_, str): return raw_value key = f"{section}.{option}" return parse_expression( name=key, val=raw_value, acceptable_types=type_, raise_type=self.ConfigError ) @abstractmethod
@abstractmethod def sources(self) -> list[str]: """Returns the sources of this config as a list of filenames.""" @abstractmethod def sections(self) -> list[str]: """Returns the sections in this config (not including DEFAULT).""" @abstractmethod def has_section(self, section: str) -> bool: """Returns whether this config has the section.""" @abstractmethod def has_option(self, section: str, option: str) -> bool: """Returns whether this config specified a value for the option.""" @abstractmethod def get_value(self, section: str, option: str) -> str | None: """Returns the value of the option in this config as a string, or None if no value specified.""" @abstractmethod def get_source_for_option(self, section: str, option: str) -> str | None: """Returns the path to the source file the given option was defined in. :param section: the scope of the option. :param option: the name of the option. :returns: the path to the config file, or None if the option was not defined by a config file. """ _TomlPrimitive = Union[bool, int, float, str] _TomlValue = Union[_TomlPrimitive, List[_TomlPrimitive]] @dataclass(frozen=True) class _ConfigValues: """The parsed contents of a TOML config file.""" values: dict[str, Any] @staticmethod def _is_an_option(option_value: _TomlValue | dict) -> bool: """Determine if the value is actually an option belonging to that section. This handles the special syntax of `my_list_option.add` and `my_list_option.remove`. """ if isinstance(option_value, dict): return "add" in option_value or "remove" in option_value return True def _possibly_interpolate_value( self, raw_value: str, *, option: str, section: str, section_values: dict, ) -> str: """For any values with %(foo)s, substitute it with the corresponding value from DEFAULT or the same section.""" def format_str(value: str) -> str: # Because dictionaries use the symbols `{}`, we must proactively escape the symbols so # that .format() does not try to improperly interpolate. escaped_str = value.replace("{", "{{").replace("}", "}}") new_style_format_str = re.sub( pattern=r"%\((?P<interpolated>[a-zA-Z_0-9]*)\)s", repl=r"{\g<interpolated>}", string=escaped_str, ) try: possible_interpolations = {**self.defaults, **section_values} return new_style_format_str.format(**possible_interpolations) except KeyError as e: bad_reference = e.args[0] raise configparser.InterpolationMissingOptionError( option, section, raw_value, bad_reference, ) def recursively_format_str(value: str) -> str: # It's possible to interpolate with a value that itself has an interpolation. We must # fully evaluate all expressions for parity with configparser. match = re.search(r"%\(([a-zA-Z_0-9]*)\)s", value) if not match: return value return recursively_format_str(value=format_str(value)) return recursively_format_str(raw_value) def _stringify_val( self, raw_value: _TomlValue, *, option: str, section: str, section_values: dict, interpolate: bool = True, list_prefix: str | None = None, ) -> str: """For parity with configparser, we convert all values back to strings, which allows us to avoid upstream changes to files like parser.py. This is clunky. If we drop INI support, we should remove this and use native values (although we must still support interpolation). """ possibly_interpolate = partial( self._possibly_interpolate_value, option=option, section=section, section_values=section_values, ) if isinstance(raw_value, str): return possibly_interpolate(raw_value) if interpolate else raw_value if isinstance(raw_value, list): def stringify_list_member(member: _TomlPrimitive) -> str: if not isinstance(member, str): return str(member) interpolated_member = possibly_interpolate(member) if interpolate else member return f'"{interpolated_member}"' list_members = ", ".join(stringify_list_member(member) for member in raw_value) return f"{list_prefix or ''}[{list_members}]" return str(raw_value) def _stringify_val_without_interpolation(self, raw_value: _TomlValue) -> str: return self._stringify_val( raw_value, option="", section="", section_values={}, interpolate=False, ) @property def sections(self) -> list[str]: return [scope for scope in self.values if scope != "DEFAULT"] def has_section(self, section: str) -> bool: return section in self.values def has_option(self, section: str, option: str) -> bool: if not self.has_section(section): return False return option in self.values[section] or option in self.defaults def get_value(self, section: str, option: str) -> str | None: section_values = self.values.get(section) if section_values is None: raise configparser.NoSectionError(section) stringify = partial( self._stringify_val, option=option, section=section, section_values=section_values, ) if option not in section_values: if option in self.defaults: return stringify(raw_value=self.defaults[option]) raise configparser.NoOptionError(option, section) option_value = section_values[option] if not isinstance(option_value, dict): return stringify(option_value) # Handle dict options, along with the special `my_list_option.add` and # `my_list_option.remove` syntax. We only treat `add` and `remove` as the special list # syntax if the values are lists to reduce the risk of incorrectly special casing. has_add = isinstance(option_value.get("add"), list) has_remove = isinstance(option_value.get("remove"), list) if not has_add and not has_remove: return stringify(option_value) add_val = stringify(option_value["add"], list_prefix="+") if has_add else None remove_val = stringify(option_value["remove"], list_prefix="-") if has_remove else None if has_add and has_remove: return f"{add_val},{remove_val}" if has_add: return add_val return remove_val def options(self, section: str) -> list[str]: section_values = self.values.get(section) if section_values is None: raise configparser.NoSectionError(section) return [ *section_values.keys(), *( default_option for default_option in self.defaults if default_option not in section_values ), ] @property def defaults(self) -> dict[str, str]: return { option: self._stringify_val_without_interpolation(option_val) for option, option_val in self.values["DEFAULT"].items() } @dataclass(frozen=True, eq=False) class _SingleFileConfig(Config): """Config read from a single file.""" config_path: str content_digest: str values: _ConfigValues def configs(self) -> list[_SingleFileConfig]: return [self] def sources(self) -> list[str]: return [self.config_path] def sections(self) -> list[str]: return self.values.sections def has_section(self, section: str) -> bool: return self.values.has_section(section) def has_option(self, section: str, option: str) -> bool: return self.values.has_option(section, option) def get_value(self, section: str, option: str) -> str | None: return self.values.get_value(section, option) def get_source_for_option(self, section: str, option: str) -> str | None: if self.has_option(section, option): return self.sources()[0] return None def __repr__(self) -> str: return f"SingleFileConfig({self.config_path})" def __eq__(self, other: Any) -> bool: if not isinstance(other, _SingleFileConfig): return NotImplemented return self.config_path == other.config_path and self.content_digest == other.content_digest def __hash__(self) -> int: return hash(self.content_digest) @dataclass(frozen=True) class _ChainedConfig(Config): """Config read from multiple sources.""" # Config instances to chain. Later instances take precedence over earlier ones. chained_configs: tuple[_SingleFileConfig, ...] @property def _configs(self) -> tuple[_SingleFileConfig, ...]: return self.chained_configs def configs(self) -> tuple[_SingleFileConfig, ...]: return self.chained_configs def sources(self) -> list[str]: # NB: Present the sources in the order we were given them. return list(itertools.chain.from_iterable(cfg.sources() for cfg in reversed(self._configs))) def sections(self) -> list[str]: ret: OrderedSet[str] = OrderedSet() for cfg in self._configs: ret.update(cfg.sections()) return list(ret) def has_section(self, section: str) -> bool: for cfg in self._configs: if cfg.has_section(section): return True return False def has_option(self, section: str, option: str) -> bool: for cfg in self._configs: if cfg.has_option(section, option): return True return False def get_value(self, section: str, option: str) -> str | None: for cfg in self._configs: try: return cfg.get_value(section, option) except (configparser.NoSectionError, configparser.NoOptionError): pass if not self.has_section(section): raise configparser.NoSectionError(section) raise configparser.NoOptionError(option, section) def get_source_for_option(self, section: str, option: str) -> str | None: for cfg in self._configs: if cfg.has_option(section, option): return cfg.get_source_for_option(section, option) return None def __repr__(self) -> str: return f"ChainedConfig({self.sources()})" @dataclass(frozen=True) class TomlSerializer: """Convert a dictionary of option scopes -> Python values into TOML understood by Pants. The constructor expects a dictionary of option scopes to their corresponding values as represented in Python. For example: { "GLOBAL": { "o1": True, "o2": "hello", "o3": [0, 1, 2], }, "some-subsystem": { "dict_option": { "a": 0, "b": 0, }, }, } """ parsed: Mapping[str, dict[str, int | float | str | bool | list | dict]] def normalize(self) -> dict: def normalize_section_value(option, option_value) -> tuple[str, Any]: # With TOML, we store dict values as strings (for now). if isinstance(option_value, dict): option_value = str(option_value) if option.endswith(".add"): option = option.rsplit(".", 1)[0] option_value = f"+{option_value!r}" elif option.endswith(".remove"): option = option.rsplit(".", 1)[0] option_value = f"-{option_value!r}" return option, option_value return { section: dict( normalize_section_value(option, option_value) for option, option_value in section_values.items() ) for section, section_values in self.parsed.items() } def serialize(self) -> str: toml_values = self.normalize() return toml.dumps(toml_values)
def configs(self) -> Sequence[_SingleFileConfig]: """Returns the underlying single-file configs represented by this object."""
ddl_test.go
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ddl import ( "context" "os" "testing" "time" . "github.com/pingcap/check" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/testleak" ) type DDLForTest interface { // SetHook sets the hook. SetHook(h Callback) // SetInterceptoror sets the interceptor. SetInterceptoror(h Interceptor) } // SetHook implements DDL.SetHook interface. func (d *ddl) SetHook(h Callback) { d.mu.Lock() defer d.mu.Unlock() d.mu.hook = h } // SetInterceptoror implements DDL.SetInterceptoror interface. func (d *ddl) SetInterceptoror(i Interceptor) { d.mu.Lock() defer d.mu.Unlock() d.mu.interceptor = i } // generalWorker returns the general worker. func (d *ddl) generalWorker() *worker { return d.workers[generalWorker] } // restartWorkers is like the function of d.start. But it won't initialize the "workers" and create a new worker. // It only starts the original workers. func (d *ddl) restartWorkers(ctx context.Context) { d.ctx, d.cancel = context.WithCancel(ctx) d.wg.Add(1) go d.limitDDLJobs() if !RunWorker { return } err := d.ownerManager.CampaignOwner() terror.Log(err) for _, worker := range d.workers { worker.wg.Add(1) worker.ctx = d.ctx w := worker go w.start(d.ddlCtx) asyncNotify(worker.ddlJobCh) } } func
(t *testing.T) { CustomVerboseFlag = true *CustomParallelSuiteFlag = true logLevel := os.Getenv("log_level") logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false)) autoid.SetStep(5000) ReorgWaitTimeout = 30 * time.Millisecond batchInsertDeleteRangeSize = 2 config.UpdateGlobal(func(conf *config.Config) { // Test for table lock. conf.EnableTableLock = true conf.Log.SlowThreshold = 10000 // Test for add/drop primary key. conf.AlterPrimaryKey = true }) testleak.BeforeTest() TestingT(t) testleak.AfterTestT(t)() } func testNewDDLAndStart(ctx context.Context, c *C, options ...Option) *ddl { d := newDDL(ctx, options...) err := d.Start(nil) c.Assert(err, IsNil) return d } func testCreateStore(c *C, name string) kv.Storage { store, err := mockstore.NewMockStore() c.Assert(err, IsNil) return store } func testNewContext(d *ddl) sessionctx.Context { ctx := mock.NewContext() ctx.Store = d.store return ctx } func getSchemaVer(c *C, ctx sessionctx.Context) int64 { err := ctx.NewTxn(context.Background()) c.Assert(err, IsNil) txn, err := ctx.Txn(true) c.Assert(err, IsNil) m := meta.NewMeta(txn) ver, err := m.GetSchemaVersion() c.Assert(err, IsNil) return ver } type historyJobArgs struct { ver int64 db *model.DBInfo tbl *model.TableInfo tblIDs map[int64]struct{} } func checkEqualTable(c *C, t1, t2 *model.TableInfo) { c.Assert(t1.ID, Equals, t2.ID) c.Assert(t1.Name, Equals, t2.Name) c.Assert(t1.Charset, Equals, t2.Charset) c.Assert(t1.Collate, Equals, t2.Collate) c.Assert(t1.PKIsHandle, DeepEquals, t2.PKIsHandle) c.Assert(t1.Comment, DeepEquals, t2.Comment) c.Assert(t1.AutoIncID, DeepEquals, t2.AutoIncID) } func checkHistoryJob(c *C, job *model.Job) { c.Assert(job.State, Equals, model.JobStateSynced) } func checkHistoryJobArgs(c *C, ctx sessionctx.Context, id int64, args *historyJobArgs) { txn, err := ctx.Txn(true) c.Assert(err, IsNil) t := meta.NewMeta(txn) historyJob, err := t.GetHistoryDDLJob(id) c.Assert(err, IsNil) c.Assert(historyJob.BinlogInfo.FinishedTS, Greater, uint64(0)) if args.tbl != nil { c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver) checkEqualTable(c, historyJob.BinlogInfo.TableInfo, args.tbl) return } // for handling schema job c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver) c.Assert(historyJob.BinlogInfo.DBInfo, DeepEquals, args.db) // only for creating schema job if args.db != nil && len(args.tblIDs) == 0 { return } } func buildCreateIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { return &model.Job{ SchemaID: dbInfo.ID, TableID: tblInfo.ID, Type: model.ActionAddIndex, BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{unique, model.NewCIStr(indexName), []*ast.IndexPartSpecification{{ Column: &ast.ColumnName{Name: model.NewCIStr(colName)}, Length: types.UnspecifiedLength}}}, } } func testCreatePrimaryKey(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string) *model.Job { job := buildCreateIdxJob(dbInfo, tblInfo, true, "primary", colName) job.Type = model.ActionAddPrimaryKey err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } func testCreateIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { job := buildCreateIdxJob(dbInfo, tblInfo, unique, indexName, colName) err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } func testAddColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, args []interface{}) *model.Job { job := &model.Job{ SchemaID: dbInfo.ID, TableID: tblInfo.ID, Type: model.ActionAddColumn, Args: args, BinlogInfo: &model.HistoryInfo{}, } err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } func testAddColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, args []interface{}) *model.Job { job := &model.Job{ SchemaID: dbInfo.ID, TableID: tblInfo.ID, Type: model.ActionAddColumns, Args: args, BinlogInfo: &model.HistoryInfo{}, } err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } func buildDropIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { tp := model.ActionDropIndex if indexName == "primary" { tp = model.ActionDropPrimaryKey } return &model.Job{ SchemaID: dbInfo.ID, TableID: tblInfo.ID, Type: tp, BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{model.NewCIStr(indexName)}, } } func testDropIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { job := buildDropIdxJob(dbInfo, tblInfo, indexName) err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } func buildRebaseAutoIDJobJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, newBaseID int64) *model.Job { return &model.Job{ SchemaID: dbInfo.ID, TableID: tblInfo.ID, Type: model.ActionRebaseAutoID, BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{newBaseID}, } }
TestT
files_6.js
var searchData=
['mouse_2eh',['mouse.h',['../mouse_8h.html',1,'']]] ];
[ ['mcp23s17_2eh',['MCP23S17.h',['../_m_c_p23_s17_8h.html',1,'']]], ['mos6502_2eh',['MOS6502.h',['../_m_o_s6502_8h.html',1,'']]],
cli.py
""" Usage: desired-state [options] control [<control-id>] desired-state [options] monitor <current-state.yml> [<rules.yml>] desired-state [options] from <initial-state.yml> to <new-state.yml> [<rules.yml>] desired-state [options] update-desired-state <new-state.yml> desired-state [options] update-actual-state <new-state.yml> desired-state [options] validate <state.yml> <schema.yml> Options: -h, --help Show this page --debug Show debug logging --verbose Show verbose logging --explain Do not run the rules, only print the ones that would run. --ask-become-pass Ask for the become password --project-src=<d> Copy project files this directory [default: .] --inventory=<i> Inventory to use --cwd=<c> Change working directory on start --stream=<s> Websocket channel to stream telemetry to --control-plane=<s> Websocket channel to connect to the control plane """ from .stream import WebsocketChannel, NullChannel from .messages import DesiredState, ActualState, Shutdown, now from .util import ConsoleTraceLog, check_state from .server import ZMQServerChannel from .client import ZMQClientChannel from .monitor import DesiredStateMonitor from .control import DesiredStateControl from .validate import get_errors, validate from .collection import split_collection_name, has_rules, has_schema, load_rules, load_schema from .types import get_meta import gevent_fsm.conf import gevent.exceptions from getpass import getpass from collections import defaultdict from docopt import docopt import yaml import os import sys import logging from uuid import uuid4 import gevent from gevent import monkey monkey.patch_all() FORMAT = "[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s" logging.basicConfig(filename='/tmp/desired_state.log', level=logging.DEBUG, format=FORMAT) # noqa logging.debug('Logging started') logging.debug('Loading runner') logging.debug('Loaded runner') logger = logging.getLogger('cli') def main(args=None): ''' Main function for the CLI. ''' if args is None: args = sys.argv[1:] parsed_args = docopt(__doc__, args) if parsed_args['--debug']: logging.basicConfig(level=logging.DEBUG) gevent_fsm.conf.settings.instrumented = True elif parsed_args['--verbose']: gevent_fsm.conf.settings.instrumented = True logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.WARNING) if parsed_args['--cwd']: os.chdir(parsed_args['--cwd']) if parsed_args['control']: return desired_state_control(parsed_args) elif parsed_args['monitor']: return desired_state_monitor(parsed_args) elif parsed_args['from'] and parsed_args['to']: return desired_state_from_to(parsed_args) elif parsed_args['update-desired-state']: return desired_state_update_desired_state(parsed_args) elif parsed_args['update-actual-state']: return desired_state_update_actual_state(parsed_args) elif parsed_args['validate']: return desired_state_validate(parsed_args) else: assert False, 'Update the docopt' def inventory(parsed_args, state): ''' Loads an inventory ''' meta = get_meta(state) if meta.inventory and os.path.exists(meta.inventory): print('inventory:', meta.inventory) with open(meta.inventory) as f: return f.read() elif not parsed_args['--inventory']: print('inventory:', 'localhost only') return "all:\n hosts:\n localhost: ansible_connection=local\n" else: print('inventory:', parsed_args['--inventory']) with open(parsed_args['--inventory']) as f: return f.read() def validate_state(state): ''' Validates state using schema if it is found in the meta data of the state. ''' meta = get_meta(state) if meta.schema: if os.path.exists(meta.schema): with open(meta.schema) as f: schema = yaml.safe_load(f.read()) elif has_schema(*split_collection_name(meta.schema)): schema = load_schema(*split_collection_name(meta.schema)) else: schema = {} validate(state, schema) def parse_options(parsed_args): secrets = defaultdict(str) if parsed_args['--ask-become-pass'] and not secrets['become']: secrets['become'] = getpass() if parsed_args['--stream']: stream = WebsocketChannel(parsed_args['--stream']) else: stream = NullChannel() project_src = os.path.abspath( os.path.expanduser(parsed_args['--project-src'])) return secrets, project_src, stream def load_rules_from_args_or_meta(parsed_args, state): meta = get_meta(state) if parsed_args['<rules.yml>']: if os.path.exists(parsed_args['<rules.yml>']): with open(parsed_args['<rules.yml>']) as f: rules = yaml.safe_load(f.read()) elif has_rules(*split_collection_name(parsed_args['<rules.yml>'])): rules = load_rules( *split_collection_name(parsed_args['<rules.yml>'])) else: raise Exception('No rules file found') elif meta.rules: if os.path.exists(meta.rules): with open(meta.rules) as f: rules = yaml.safe_load(f.read()) elif has_rules(*split_collection_name(meta.rules)): rules = load_rules(*split_collection_name(meta.rules)) else:
else: raise Exception('No rules file found') return rules def desired_state_control(parsed_args): secrets, _, stream = parse_options(parsed_args) control_id = parsed_args['<control-id>'] or str(uuid4()) if parsed_args['--control-plane']: control_plane = WebsocketChannel(parsed_args['--control-plane']) else: control_plane = NullChannel() threads = [] if stream.thread: threads.append(stream.thread) tracer = ConsoleTraceLog() control = DesiredStateControl( tracer, 0, control_id, secrets, stream, control_plane) control_plane.outbox = control.queue threads.append(control.thread) server = ZMQServerChannel(control.queue, tracer) threads.append(server.zmq_thread) threads.append(server.controller_thread) control.controller.outboxes['output'] = server.queue gevent.joinall(threads) def desired_state_monitor(parsed_args): ''' Starts the state monitoring green thread. ''' secrets, project_src, stream = parse_options(parsed_args) threads = [] if stream.thread: threads.append(stream.thread) with open(parsed_args['<current-state.yml>']) as f: current_desired_state = yaml.safe_load(f.read()) validate_state(current_desired_state) rules = load_rules_from_args_or_meta(parsed_args, current_desired_state) tracer = ConsoleTraceLog() worker = DesiredStateMonitor(tracer, 0, secrets, project_src, rules, current_desired_state, inventory( parsed_args, current_desired_state), stream) threads.append(worker.thread) server = ZMQServerChannel(worker.queue, tracer) threads.append(server.zmq_thread) threads.append(server.controller_thread) worker.controller.outboxes['output'] = server.queue gevent.joinall(threads) return 0 def desired_state_from_to(parsed_args): ''' Calculates the differene in state from initial-state to new-state executes those changes and exits. ''' secrets, project_src, stream = parse_options(parsed_args) threads = [] if stream.thread: threads.append(stream.thread) with open(parsed_args['<initial-state.yml>']) as f: initial_desired_state = yaml.safe_load(f.read()) validate_state(initial_desired_state) with open(parsed_args['<new-state.yml>']) as f: new_desired_state = f.read() validate_state(yaml.safe_load(new_desired_state)) rules = load_rules_from_args_or_meta(parsed_args, initial_desired_state) tracer = ConsoleTraceLog() worker = DesiredStateMonitor(tracer, 0, secrets, project_src, rules, initial_desired_state, inventory( parsed_args, initial_desired_state), stream) threads.append(worker.thread) worker.queue.put(DesiredState(0, now(), 0, 0, new_desired_state)) worker.queue.put(Shutdown()) gevent.joinall([worker.thread]) return 0 def desired_state_update_desired_state(parsed_args): ''' Sends a new desired state to the monitor green thread. ''' with open(parsed_args['<new-state.yml>']) as f: new_state = f.read() check_state(new_state) validate_state(yaml.safe_load(new_state)) client = ZMQClientChannel() client.send(DesiredState(0, now(), 0, 0, new_state)) return 0 def desired_state_update_actual_state(parsed_args): ''' Sends a new actual state to the monitor green thread. ''' with open(parsed_args['<new-state.yml>']) as f: new_state = f.read() check_state(new_state) validate_state(yaml.safe_load(new_state)) client = ZMQClientChannel() client.send(ActualState(0, 0, new_state)) return 0 def desired_state_validate(parsed_args): ''' Validates a state using the schema and prints a list of errors in the state. ''' with open(parsed_args['<state.yml>']) as f: state = yaml.safe_load(f.read()) with open(parsed_args['<schema.yml>']) as f: schema = yaml.safe_load(f.read()) for error in get_errors(state, schema): print(error) else: return 0 return 1 if __name__ == "__main__": main()
raise Exception('No rules file found')
lib.rs
#[cfg(feature = "rayon")] extern crate intervals; extern crate ndarray; extern crate healpix; extern crate num; extern crate numpy; extern crate rayon; extern crate time; extern crate pyo3; #[macro_use] extern crate lazy_static; use std::ops::Range; use std::sync::Mutex; use std::path::Path; use std::collections::HashMap; use std::sync::atomic::{AtomicUsize, Ordering}; use ndarray::{Array, Array1, Array2, Ix2}; use numpy::{IntoPyArray, PyArray1, PyArray2, PyReadonlyArray1, PyReadonlyArray2}; use pyo3::prelude::{pymodule, Py, PyModule, PyResult, Python}; use pyo3::types::{PyDict, PyList, PyString}; use pyo3::{PyObject, ToPyObject, exceptions}; use moc::qty::{MocQty, Hpx}; use moc::elemset::range::{ MocRanges, uniq::HpxUniqRanges }; use moc::moc::{CellMOCIterator, CellMOCIntoIterator}; use moc::moc::range::RangeMOC; use moc::ranges::{SNORanges, Ranges}; use moc::hpxranges2d::TimeSpaceMoc; pub mod ndarray_fromto; pub mod coverage; pub mod spatial_coverage; pub mod temporal_coverage; pub mod time_space_coverage; use crate::ndarray_fromto::{ranges_to_array2, mocranges_to_array2, vec_range_to_array2}; // uniq_ranges_to_array1 type Coverage2DHashMap = HashMap<usize, TimeSpaceMoc<u64, u64>>; lazy_static! { static ref COVERAGES_2D: Mutex<Coverage2DHashMap> = Mutex::new(HashMap::new()); static ref NUM_COVERAGES_2D: AtomicUsize = AtomicUsize::new(0); } /// Insert a Time-Space coverage in the Hash Map /// storing all the current 2D coverages /// /// # Arguments /// /// * `coverage` - The new Time-Space coverage to insert /// /// # Panics /// /// * This will panic if the `COVERAGES_2D` or `NUM_COVERAGES_2D` /// are already held by the current thread fn insert_new_coverage(coverage: TimeSpaceMoc<u64, u64>) -> usize { let mut coverages = COVERAGES_2D.lock().unwrap(); let index = NUM_COVERAGES_2D.fetch_add(1, Ordering::SeqCst ); if let Some(_v) = coverages.insert(index, coverage) { panic!("There is already a coverage at this index."); } index } /// Remove a Time-Space coverage from the Hash Map /// storing all the current 2D coverages /// /// # Arguments /// /// * `index` - The coverage to remove /// /// # Panics /// /// * If `COVERAGES_2D` is already held by the current thread. fn remove_coverage(index: usize) { let mut coverages = COVERAGES_2D.lock().unwrap(); let _coverage = coverages .remove(&index) // `None` is mapped to `Err(&'static str)` // because we suppose there should be a coverage // stored in the hash map at the `index` key. .expect("There is no coverage to remove"); } /// Replace a Time-Space coverage at a specific index. /// /// # Arguments /// /// * `index` - The index of the Time-Space coverage to replace /// * `coverage` - The new coverage /// /// # Panics /// /// * If no Time-Space coverage has been found in the hash map /// for this specific `index`. /// * If `COVERAGES_2D` is already held by the current thread. fn update_coverage(index: usize, coverage: TimeSpaceMoc<u64, u64>) { let mut coverages = COVERAGES_2D.lock().unwrap(); coverages .insert(index, coverage) // `None` is mapped to `Err(&'static str)` // because we suppose there should be a coverage // stored in the hash map at the `index` key. .expect("There is no coverage present"); } fn coverage_op<O>(py: Python, a: PyReadonlyArray2<u64>, b: PyReadonlyArray2<u64>, op: O) -> Py<PyArray2<u64>> where O: Fn(Ranges<u64>, Ranges<u64>) -> Ranges<u64> { let ranges_a = a.as_array().to_owned(); let ranges_b = b.as_array().to_owned(); let cov_a = coverage::create_ranges_from_py_unchecked(ranges_a); let cov_b = coverage::create_ranges_from_py_unchecked(ranges_b); let result = op(cov_a, cov_b); let result: Array2<u64> = ranges_to_array2(result); result.to_owned().into_pyarray(py).to_owned() } fn coverage_complement<Q, F>(py: Python, ranges: PyReadonlyArray2<u64>, to_moc_ranges: F) -> Py<PyArray2<u64>> where Q: MocQty<u64>, F: Fn(Array2<u64>) -> MocRanges<u64, Q> { let ranges = ranges.as_array().to_owned(); let coverage = to_moc_ranges(ranges); let result = coverage.complement(); let result = mocranges_to_array2(result); result.into_pyarray(py).to_owned() } fn coverage_degrade<Q, F>( py: Python, ranges: PyReadonlyArray2<u64>, depth: u8, to_moc_ranges: F, ) -> PyResult<Py<PyArray2<u64>>> where Q: MocQty<u64>, F: Fn(Array2<u64>) -> MocRanges<u64, Q> { let ranges = ranges.as_array().to_owned(); let mut ranges = to_moc_ranges(ranges); coverage::degrade_ranges(&mut ranges, depth)?; // The result is already consistent let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } fn coverage_merge_intervals<Q, F>( py: Python, ranges: PyReadonlyArray2<u64>, min_depth: i8, to_moc_ranges: F, ) -> PyResult<Py<PyArray2<u64>>> where Q: MocQty<u64>, F: Fn(Array2<u64>) -> MocRanges<u64, Q> { let ranges = ranges.as_array().to_owned(); let mut coverage = to_moc_ranges(ranges); coverage = coverage::merge(coverage, min_depth)?; let result: Array2<u64> = mocranges_to_array2(coverage); Ok(result.into_pyarray(py).to_owned()) } #[pymodule] fn mocpy(_py: Python, m: &PyModule) -> PyResult<()> { /// Create a 1D spatial coverage from a list of /// longitudes and latitudes /// /// # Arguments /// /// * ``depth`` - The depth of the coverage between `[0, <u64>::MAXDEPTH] = [0, 29]` /// * ``lon`` - The longitudes in radians /// * ``lat`` - The latitudes in radians /// /// # Precondition /// /// ``lon`` and ``lat`` must be expressed in radians and be valid. /// /// # Errors /// /// * ``lon`` and ``lat`` do not have the same length /// * ``depth`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 29]` #[pyfn(m, "from_lonlat")] fn from_lonlat( py: Python, depth: u8, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>, ) -> PyResult<Py<PyArray2<u64>>> { let lon = lon.as_array().to_owned().into_raw_vec(); let lat = lat.as_array().to_owned().into_raw_vec(); let ranges = spatial_coverage::create_from_position(lon, lat, depth)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Create a 1D spatial coverage from a list of uniq cells each associated with a value. /// /// The coverage computed contains the cells summing from ``cumul_from`` to ``cumul_to``. /// /// # Arguments /// /// * ``uniq`` - Uniq HEALPix indices /// * ``values`` - Array containing the values associated for each cells. /// Must be of the same size of ``uniq`` and must sum to one. /// * ``cumul_from`` - The cumulative value from which cells are put in the coverage /// * ``cumul_to`` - The cumulative value to which cells are put in the coverage /// * ``max_depth`` - the largest depth of the output MOC, which must be larger or equals to the largest /// depth in the `uniq` values /// * `asc`: cumulative value computed from lower to highest densities instead of from highest to lowest /// * `strict`: (sub-)cells overlapping the `cumul_from` or `cumul_to` values are not added /// * `no_split`: cells overlapping the `cumul_from` or `cumul_to` values are not recursively split /// * `reverse_decent`: perform the recursive decent from the highest cell number to the lowest (to be compatible with Aladin) /// /// # Precondition /// /// * ``uniq`` and ``values`` must be of the same size /// * ``values`` must sum to one #[pyfn(m, "from_valued_hpx_cells")] fn from_valued_hpx_cells( py: Python, max_depth: u8, uniq: PyReadonlyArray1<u64>, values: PyReadonlyArray1<f64>, cumul_from: f64, cumul_to: f64, asc: bool, strict: bool, no_split: bool, reverse_decent: bool, ) -> PyResult<Py<PyArray2<u64>>> { let uniq = uniq.as_array().to_owned(); let values = values.as_array().to_owned(); let ranges = spatial_coverage::from_valued_healpix_cells_with_opt( max_depth, uniq, values, cumul_from, cumul_to, asc, strict, no_split, reverse_decent, )?; //from_valued_healpix_cells(max_depth, uniq, values, cumul_from, cumul_to)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Create a 2D Time-Space coverage from a list of /// (time, longitude, latitude) tuples. /// /// # Arguments /// /// * ``times`` - The times at which the sky coordinates have be given, in jd coded /// on doubles (=> not precise to the microsecond). /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians /// * ``lat`` - The latitudes in radians /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon`` and ``lat`` must be expressed in radians. /// * ``times`` must be expressed in jd, on doubles (=> not precise to the microsecond). /// /// # Errors /// /// * ``lon``, ``lat`` and ``times`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 61]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` /// /// # Remark /// /// Method kept temporarily to ensure backward compatibility. /// #[pyfn(m, "from_time_lonlat_approx")] fn from_time_lonlat_approx( index: usize, times: PyReadonlyArray1<f64>, d1: u8, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>, d2: u8, ) -> PyResult<()> { let times = times.as_array() .to_owned() .into_raw_vec(); let lon = lon.as_array() .to_owned() .into_raw_vec(); let lat = lat.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::create_from_times_positions_approx(times, lon, lat, d1, d2)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } /// Create a 2D Time-Space coverage from a list of /// (time, longitude, latitude) tuples. /// /// # Arguments /// /// * ``times`` - The times at which the sky coordinates have be given, in microsecond since JD=0. /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians /// * ``lat`` - The latitudes in radians /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon`` and ``lat`` must be expressed in radians. /// * ``times`` must be expressed in jd, in microsecond since JD=0. /// /// # Errors /// /// * ``lon``, ``lat`` and ``times`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 61]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` #[pyfn(m, "from_time_lonlat")] fn from_time_lonlat( index: usize, times: PyReadonlyArray1<u64>, d1: u8, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>, d2: u8, ) -> PyResult<()> { let times = times.as_array() .to_owned() .into_raw_vec(); let lon = lon.as_array() .to_owned() .into_raw_vec(); let lat = lat.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::create_from_times_positions(times, lon, lat, d1, d2)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } /// Create a 2D Time-Space coverage from a list of /// (time_range, longitude, latitude) tuples. /// /// # Arguments /// /// * ``times_min`` - The begining time of observation, in jd coded /// on doubles (=> not precise to the microsecond). /// * ``times_max`` - The ending time of observation, in jd coded /// on doubles (=> not precise to the microsecond). /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians /// * ``lat`` - The latitudes in radians /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon`` and ``lat`` must be expressed in radians. /// * ``times`` must be expressed in jd. /// /// # Errors /// /// * ``lon``, ``lat`` and ``times`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 61]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` /// /// # Remark /// /// Method kept temporarily to ensure backward compatibility. /// #[pyfn(m, "from_time_ranges_lonlat_approx")] fn from_time_ranges_lonlat_approx( index: usize, times_min: PyReadonlyArray1<f64>, times_max: PyReadonlyArray1<f64>, d1: u8, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>, d2: u8, ) -> PyResult<()> { let times_min = times_min.as_array() .to_owned() .into_raw_vec(); let times_max = times_max.as_array() .to_owned() .into_raw_vec(); let lon = lon.as_array() .to_owned() .into_raw_vec(); let lat = lat.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::create_from_time_ranges_positions_approx(times_min, times_max, d1, lon, lat, d2)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } /// Create a 2D Time-Space coverage from a list of /// (time_range, longitude, latitude) tuples. /// /// # Arguments /// /// * ``times_min`` - The begining time of observation, in microsecond since JD=0. /// * ``times_max`` - The ending time of observation, in microsecond since JD=0. /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians /// * ``lat`` - The latitudes in radians /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon`` and ``lat`` must be expressed in radians. /// * ``times`` must be expressed in jd. /// /// # Errors /// /// * ``lon``, ``lat`` and ``times`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 61]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` /// #[pyfn(m, "from_time_ranges_lonlat")] fn from_time_ranges_lonlat( index: usize, times_min: PyReadonlyArray1<u64>, times_max: PyReadonlyArray1<u64>, d1: u8, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>, d2: u8, ) -> PyResult<()> { let times_min = times_min.as_array() .to_owned() .into_raw_vec(); let times_max = times_max.as_array() .to_owned() .into_raw_vec(); let lon = lon.as_array() .to_owned() .into_raw_vec(); let lat = lat.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::create_from_time_ranges_positions(times_min, times_max, d1, lon, lat, d2)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } /// Create a 2D Time-Space coverage from a list of /// (time_range, longitude, latitude, radius) tuples. /// /// # Arguments /// /// * ``times_min`` - The begining time of observation in jd coded /// on doubles (=> not precise to the microsecond). /// * ``times_max`` - The ending time of observation, in jd coded /// on doubles (=> not precise to the microsecond). /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians. /// * ``lat`` - The latitudes in radians. /// * ``radius`` - Radius in radians. /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon``, ``lat`` and ``radius`` must be expressed in radians. /// * ``times`` must be expressed in jd. /// /// # Errors /// /// * ``lon``, ``lat``, ``times_min``, ``times_max`` and ``radius`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 61]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` /// /// # Remark /// /// Method kept temporarily to ensure backward compatibility. /// #[pyfn(m, "from_time_ranges_spatial_coverages_approx")] fn from_time_ranges_spatial_coverages_approx( py: Python, index: usize, times_min: PyReadonlyArray1<f64>, times_max: PyReadonlyArray1<f64>, d1: u8, spatial_coverages: &PyList, ) -> PyResult<()> { let times_min = times_min.as_array() .to_owned() .into_raw_vec(); let times_max = times_max.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::from_time_ranges_spatial_coverages_approx(py, times_min, times_max, d1, spatial_coverages)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } /// Create a 2D Time-Space coverage from a list of /// (time_range, longitude, latitude, radius) tuples. /// /// # Arguments /// /// * ``times_min`` - The begining time of observation, in microsecond since JD=0. /// * ``times_max`` - The ending time of observation, in microsecond since JD=0. /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians. /// * ``lat`` - The latitudes in radians. /// * ``radius`` - Radius in radians. /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon``, ``lat`` and ``radius`` must be expressed in radians. /// * ``times`` must be expressed in jd. /// /// # Errors /// /// * ``lon``, ``lat``, ``times_min``, ``times_max`` and ``radius`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 61]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` /// #[pyfn(m, "from_time_ranges_spatial_coverages")] fn from_time_ranges_spatial_coverages( py: Python, index: usize, times_min: PyReadonlyArray1<u64>, times_max: PyReadonlyArray1<u64>, d1: u8, spatial_coverages: &PyList, ) -> PyResult<()> { let times_min = times_min.as_array() .to_owned() .into_raw_vec(); let times_max = times_max.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::from_time_ranges_spatial_coverages(py, times_min, times_max, d1, spatial_coverages)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } #[pyfn(m, "project_on_first_dim")] fn project_on_first_dim(py: Python, ranges: PyReadonlyArray2<u64>, index: usize) -> Py<PyArray2<u64>> { // Build the input ranges from a Array2 let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_hpx_ranges_from_py_unchecked(ranges); // Get the coverage and perform the projection let result = { let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::project_on_first_dim(&ranges, coverage) }; // Convert the result back to an ndarray::Array2 let result: Array2<u64> = mocranges_to_array2(result); result.into_pyarray(py).to_owned() } /// Project the Time-Space coverage into its second dimension /// (i.e. the Space axis) /// /// # Arguments /// /// * ``ranges`` - The constrained time set of ranges. /// * ``index`` - The index of the Time-Space coverage. /// /// # Algorithm /// /// Returns the union of the spatial coverages for which /// their time ranges is contained into ``x``. /// /// # Panic /// /// If the ``ranges`` is not valid i.e.: /// /// * Contains ranges for which their inf bound is /// superior to their sup bound. /// /// This **should** not panic as this code is wrapped around MOCPy #[pyfn(m, "project_on_second_dim")] fn project_on_second_dim( py: Python, ranges: PyReadonlyArray2<u64>, index: usize, ) -> Py<PyArray2<u64>> { // Build the input ranges from a Array2 let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_time_ranges_from_py_uncheked(ranges); // Get the coverage and perform the projection let result = { let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::project_on_second_dim(&ranges, coverage) }; // Convert the result back to an ndarray::Array2 let result: Array2<u64> = mocranges_to_array2(result); result.into_pyarray(py).to_owned() } /// Serialize a Time-Space coverage into FITS /// /// # Context /// /// This is wrapped around the `serialize` method /// of MOCPy to serialize a Time-Space coverage into /// FITS. /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. #[pyfn(m, "coverage_2d_to_fits")] fn coverage_2d_to_fits(py: Python, index: usize) -> Py<PyArray1<i64>> { // Get the coverage and flatten it // to a Array1 let result: Array1<i64> = { let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::to_fits(coverage) }; result.into_pyarray(py).to_owned() } /// Serialize a Time-Space coverage into a FITS file /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. /// * ``path`` - the path of the output file #[pyfn(m, "coverage_2d_to_fits_file")] fn coverage_2d_to_fits_file(path: String, index: usize) -> PyResult<()> { // Get the coverage and flatten it // to a Array1 let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let(depth_max_t, depth_max_s) = coverage.compute_min_depth(); time_space_coverage::to_fits_file(depth_max_t, depth_max_s, coverage, Path::new(&path)) .map_err(|e| exceptions::PyIOError::new_err(e.to_string())) } /// Serialize a Time-Space coverage into an ASCII file /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. /// * ``path`` - the path of the output file #[pyfn(m, "coverage_2d_to_ascii_file")] fn coverage_2d_to_ascii_file(path: String, index: usize) -> PyResult<()> { // Get the coverage and flatten it // to a Array1 let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let(depth_max_t, depth_max_s) = coverage.compute_min_depth(); time_space_coverage::to_ascii_file(depth_max_t, depth_max_s, coverage, path) .map_err(|e| exceptions::PyIOError::new_err(e.to_string())) } /// Serialize a Time-Space coverage into an ASCII string /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. /// * ``path`` - the path of the output file #[pyfn(m, "coverage_2d_to_ascii_str")] fn coverage_2d_to_ascii_str(py: Python, index: usize) -> Py<PyString> { // Get the coverage and flatten it // to a Array1 let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let(depth_max_t, depth_max_s) = coverage.compute_min_depth(); PyString::new(py, &time_space_coverage::to_ascii_str(depth_max_t, depth_max_s, coverage)).into() } /// Serialize a Time-Space coverage into a JSON file /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. /// * ``path`` - the path of the output file #[pyfn(m, "coverage_2d_to_json_file")] fn coverage_2d_to_json_file(path: String, index: usize) -> PyResult<()> { // Get the coverage and flatten it // to a Array1 let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let(depth_max_t, depth_max_s) = coverage.compute_min_depth(); time_space_coverage::to_json_file(depth_max_t, depth_max_s, coverage, path) .map_err(|e| exceptions::PyIOError::new_err(e.to_string())) } /// Serialize a Time-Space coverage into a JSON file /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. /// * ``path`` - the path of the output file #[pyfn(m, "coverage_2d_to_json_str")] fn coverage_2d_to_json_str(py: Python, index: usize) -> Py<PyString> { // Get the coverage and flatten it // to a Array1 let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let(depth_max_t, depth_max_s) = coverage.compute_min_depth(); PyString::new(py, &time_space_coverage::to_json_str(depth_max_t, depth_max_s, coverage)).into() } /* /// Deserialize a Time-Space coverage from a JSON file /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. /// * ``path`` - the path of the output file #[pyfn(m, "coverage_2d_from_json_file")] fn coverage_2d_from_json_file(path: String, index: usize) -> PyResult<()> { // Get the coverage and flatten it // to a Array1 let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let(depth_max_t, depth_max_s) = coverage.compute_min_depth(); time_space_coverage::to_json_file(depth_max_t, depth_max_s, coverage, path) .map_err(|e| exceptions::PyIOError::new_err(e.to_string())) } /// Deserialize a Time-Space coverage into a JSON file /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. /// * ``path`` - the path of the output file #[pyfn(m, "coverage_2d_from_json_str")] fn coverage_2d_from_json_str(py: Python, index: usize) -> Py<PyString> { // Get the coverage and flatten it // to a Array1 let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let(depth_max_t, depth_max_s) = coverage.compute_min_depth(); PyString::new(py, &time_space_coverage::to_json_str(depth_max_t, depth_max_s, coverage)).into() }*/ /// Deserialize a Time-Space coverage from FITS using the pre v2.0 MOC standard. /// /// # Context /// /// This is wrapped around the `from_fits` method /// of MOCPy to load a Time-Space coverage from a /// FITS file. /// /// # Arguments /// /// * ``data`` - A 1d array buffer containing the time and /// space axis ranges data. /// /// # Errors /// /// The `Array1` object stores the Time-Space coverage /// under the nested format. /// Its memory layout contains each time range followed by the /// list of space ranges referred to that time range. /// Time ranges are negatives so that one can distinguish them /// from space ranges. /// /// This method returns a `PyValueError` if the `Array1` is not /// defined as above. #[pyfn(m, "coverage_2d_from_fits_pre_v2")] fn coverage_2d_from_fits_pre_v2(index: usize, data: PyReadonlyArray1<i64>) -> PyResult<()> { let data = data.as_array().to_owned(); let coverage_from_fits = time_space_coverage::from_fits_pre_v2(data)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage_from_fits); Ok(()) } /// Deserialize a Time-Space coverage from FITS using the v2.0 MOC standard. /// /// # Context /// /// This is wrapped around the `from_fits` method /// of MOCPy to load a Time-Space coverage from a /// FITS file. /// /// # Arguments /// /// * ``data`` - A 1d array buffer containing the time and /// space axis ranges data. /// /// # Errors /// /// The `Array1` object stores the Time-Space coverage /// under the nested format. /// Its memory layout contains a list of time ranges followed by the /// list of space ranges referred to that time ranges. /// The most significant bit (MSB) of time ranges bounds is set to one so that one can /// distinguish them from space ranges. /// This is different from a negative value because we do not use the two's complement /// representation, only a flag set on the MSB. /// /// This method returns a `PyValueError` if the `Array1` is not /// defined as above. #[pyfn(m, "coverage_2d_from_fits")] fn coverage_2d_from_fits(index: usize, data: PyReadonlyArray1<u64>) -> PyResult<()> { let data = data.as_array().to_owned(); let coverage_from_fits = time_space_coverage::from_fits(data)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage_from_fits); Ok(()) } /// Deserialize a Time-Space coverage from a FITS file (compatible with the MOC v2.0 standard). /// /// # Arguments /// /// * ``index`` - the index used to store the Time-Space coverage /// * ``path`` - the FITS file path /// /// # Warning /// /// This function is not compatible with pre-v2.0 MOC standard. /// /// # Errors /// /// This method returns a `PyIOError` if the the function fails in writing the FITS file. #[pyfn(m, "coverage_2d_from_fits_file")] fn coverage_2d_from_fits_file(index: usize, path: String) -> PyResult<()> { let coverage_from_fits = time_space_coverage::from_fits_file(Path::new(&path))?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage_from_fits); Ok(()) } /// Deserialize a Time-Space coverage from an ASCII file (compatible with the MOC v2.0 standard). /// /// # Arguments /// /// * ``index`` - the index used to store the Time-Space coverage /// * ``path`` - the ASCII file path /// /// # Errors /// /// This method returns a `PyIOError` if the the function fails in writing the FITS file. #[pyfn(m, "coverage_2d_from_ascii_file")] fn coverage_2d_from_ascii_file(index: usize, path: String) -> PyResult<()> { let coverage_from_ascii = time_space_coverage::from_ascii_file(Path::new(&path))?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage_from_ascii); Ok(()) } /// Deserialize a Time-Space coverage from an JSON file. /// /// # Arguments /// /// * ``index`` - the index used to store the Time-Space coverage /// * ``path`` - the JSON file path /// /// # Errors /// /// This method returns a `PyIOError` if the the function fails in writing the FITS file. #[pyfn(m, "coverage_2d_from_json_file")] fn coverage_2d_from_json_file(index: usize, path: String) -> PyResult<()> { let coverage_from_json = time_space_coverage::from_json_file(Path::new(&path))?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage_from_json); Ok(()) } /// Deserialize a Time-Space coverage from an ASCII string (compatible with the MOC v2.0 standard). /// /// # Arguments /// /// * ``index`` - the index used to store the Time-Space coverage /// * ``ascii`` - the ASCII string /// /// # Errors /// /// This method returns a `PyIOError` if the the function fails in writing the FITS file. #[pyfn(m, "coverage_2d_from_ascii_str")] fn coverage_2d_from_ascii_str(index: usize, ascii: String) -> PyResult<()> { let coverage_from_ascii = time_space_coverage::from_ascii_str(ascii)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage_from_ascii); Ok(()) } /// Deserialize a Time-Space coverage from an JSON string. /// /// # Arguments /// /// * ``index`` - the index used to store the Time-Space coverage /// * ``json`` - the JSON string /// /// # Errors /// /// This method returns a `PyIOError` if the the function fails in writing the FITS file. #[pyfn(m, "coverage_2d_from_json_str")] fn coverage_2d_from_json_str(index: usize, json: String) -> PyResult<()> { let coverage_from_json = time_space_coverage::from_json_str(json)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage_from_json); Ok(()) } /// Create a new empty Time-Space coverage /// /// This method is called in the constructor of the /// `mocpy.STMOC` class /// /// # Returns /// /// The index of the newly created Time-Space coverage #[pyfn(m, "create_2d_coverage")] fn create_2d_coverage(_py: Python) -> usize { // Create new empty coverage let empty_coverage = time_space_coverage::new(); // Insert a new coverage in the COVERAGES_2D // hash map and return its index key to python insert_new_coverage(empty_coverage) } /// Drop the content of a Time-Space coverage /// /// This method is automatically called by the /// Python garbage collector. #[pyfn(m, "drop_2d_coverage")] fn drop_2d_coverage(_py: Python, index: usize) { remove_coverage(index); } /// Computes the depth of a Time-Space coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// /// # Infos /// /// If the Time-Space coverage is empty, the returned /// depth is `(0, 0)`. #[pyfn(m, "coverage_2d_depth")] fn coverage_2d_depth(_py: Python, index: usize) -> (u8, u8) { // Get the coverage and computes its depth // If the coverage is empty, the depth will be // (0, 0) let result = { let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::depth(coverage) }; result } /// Returns the minimum time value of the Time-Space /// coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// /// # Errors /// /// * If the coverage is empty. /// /// # Remark /// /// Method kept temporarily to ensure backward compatibility. /// #[pyfn(m, "coverage_2d_min_time_approx")] fn coverage_2d_min_time_approx(_py: Python, index: usize) -> PyResult<f64> { // Get the coverage let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::t_min_jd(coverage) } /// Returns the minimum time value of the Time-Space /// coverage, in microarcsec since jd=0. /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// /// # Errors /// /// * If the coverage is empty. /// #[pyfn(m, "coverage_2d_min_time")] fn coverage_2d_min_time(_py: Python, index: usize) -> PyResult<u64> { // Get the coverage let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::t_min_mircosecond_since_jd_org(coverage) } /// Returns the maximum time value of the Time-Space /// coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// /// # Errors /// /// * If the coverage is empty. /// /// # Remark /// /// Method kept temporarily to ensure backward compatibility. /// #[pyfn(m, "coverage_2d_max_time_approx")] fn coverage_2d_max_time_approx(_py: Python, index: usize) -> PyResult<f64> { // Get the coverage let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::t_max_jd(coverage) } /// Returns the maximum time value of the Time-Space /// coverage, in microseconds since jd=0. /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// /// # Errors /// /// * If the coverage is empty. /// #[pyfn(m, "coverage_2d_max_time")] fn coverage_2d_max_time(_py: Python, index: usize) -> PyResult<u64> { // Get the coverage let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::t_max_mircosecond_since_jd_org(coverage) } /// Perform the union between two Time-Space coverages. /// /// # Arguments /// /// * ``id_left`` - The index of the Time-Space coverage being /// in the left of the operation. /// * ``id_right`` - The index of the Time-Space coverage being /// in the right of the operation. #[pyfn(m, "coverage_2d_union")] fn coverage_2d_union(_py: Python, index: usize, id_left: usize, id_right: usize) { let result = { let coverages = COVERAGES_2D.lock().unwrap(); // Get the left and right coverages let coverage_left = coverages.get(&id_left).unwrap(); let coverage_right = coverages.get(&id_right).unwrap(); // Perform the union time_space_coverage::union(coverage_left, coverage_right) }; // Update the coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, result); } /// Perform the intersection between two Time-Space coverages. /// /// # Arguments /// /// * ``id_left`` - The index of the Time-Space coverage being /// in the left of the operation. /// * ``id_right`` - The index of the Time-Space coverage being /// in the right of the operation. #[pyfn(m, "coverage_2d_intersection")] fn coverage_2d_intersection(_py: Python, index: usize, id_left: usize, id_right: usize) { let result = { let coverages = COVERAGES_2D.lock().unwrap(); // Get the left and right coverages let coverage_left = coverages.get(&id_left).unwrap(); let coverage_right = coverages.get(&id_right).unwrap(); // Perform the intersection time_space_coverage::intersection(coverage_left, coverage_right) }; // Update the coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, result) } /// Perform the difference between two Time-Space coverages. /// /// # Arguments /// /// * ``id_left`` - The index of the Time-Space coverage being /// in the left of the operation. /// * ``id_right`` - The index of the Time-Space coverage being /// in the right of the operation. #[pyfn(m, "coverage_2d_difference")] fn coverage_2d_difference(_py: Python, index: usize, id_left: usize, id_right: usize) { let result = { let coverages = COVERAGES_2D.lock().unwrap(); // Get the left and right coverages let coverage_left = coverages.get(&id_left).unwrap(); let coverage_right = coverages.get(&id_right).unwrap(); // Perform the difference time_space_coverage::difference(coverage_left, coverage_right) }; // Update the coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, result) } /// Check the equality between two Time-Space coverages /// /// # Arguments /// /// * ``id_left`` - The index of the Time-Space coverage being /// in the left of the operation. /// * ``id_right`` - The index of the Time-Space coverage being /// in the right of the operation. #[pyfn(m, "coverage_2d_equality_check")] fn coverage_2d_equality_check(_py: Python, id_left: usize, id_right: usize) -> bool { let result = { let coverages = COVERAGES_2D.lock().unwrap(); // Get the left and right coverages let cov_left = coverages.get(&id_left).unwrap(); let cov_right = coverages.get(&id_right).unwrap(); // Check the equality cov_left == cov_right }; // Return the index of the newly created // coverage result } /// Checks whether a Time-Space coverage is empty. /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage to check /// the emptiness. #[pyfn(m, "coverage_2d_is_empty")] fn coverage_2d_is_empty(_py: Python, index: usize) -> bool { // Get the coverage let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); coverage.is_empty() } /// Check if (time, position) tuples are contained into a Time-Space coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// * ``times`` - Times at which the positions have been observed. /// * ``lon`` - The longitudes. /// * ``lat`` - The latitudes. /// /// # Errors /// /// * If `lon`, `lat` and `times` do not have the same length /// /// # Remark /// /// Method kept temporarily to ensure backward compatibility. /// #[pyfn(m, "coverage_2d_contains_approx")] fn coverage_2d_contains_approx( py: Python, index: usize, times: PyReadonlyArray1<f64>, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>) -> PyResult<Py<PyArray1<bool>>> { let times = times.as_array().to_owned(); let lon = lon.as_array().to_owned(); let lat = lat.as_array().to_owned(); let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let mut result: Array1<bool> = Array::from_elem((lon.shape()[0],), false); time_space_coverage::contains_approx(coverage, times, lon, lat, &mut result)?; Ok(result.into_pyarray(py).to_owned()) } /// Check if (time, position) tuples are contained into a Time-Space coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// * ``times`` - Times at which the positions have been observed, in microsec since jd=0 /// * ``lon`` - The longitudes. /// * ``lat`` - The latitudes. /// /// # Errors /// /// * If `lon`, `lat` and `times` do not have the same length #[pyfn(m, "coverage_2d_contains")] fn coverage_2d_contains( py: Python, index: usize, times: PyReadonlyArray1<u64>, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>) -> PyResult<Py<PyArray1<bool>>> { let times = times.as_array().to_owned(); let lon = lon.as_array().to_owned(); let lat = lat.as_array().to_owned(); let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let mut result: Array1<bool> = Array::from_elem((lon.shape()[0],), false); time_space_coverage::contains(coverage, times, lon, lat, &mut result)?; Ok(result.into_pyarray(py).to_owned()) } /// Perform the union between two generic coverages /// /// # Arguments /// /// * ``a`` - The spatial coverage being the left operand /// * ``b`` - The spatial coverage being the right operand #[pyfn(m, "coverage_union")] fn coverage_union(py: Python, a: PyReadonlyArray2<u64>, b: PyReadonlyArray2<u64>) -> Py<PyArray2<u64>>
/// Perform the difference between two generic coverages /// /// # Arguments /// /// * ``a`` - The spatial coverage being the left operand /// * ``b`` - The spatial coverage being the right operand #[pyfn(m, "coverage_difference")] fn coverage_difference(py: Python, a: PyReadonlyArray2<u64>, b: PyReadonlyArray2<u64>) -> Py<PyArray2<u64>> { /*let ranges_a = a.as_array().to_owned(); let ranges_b = b.as_array().to_owned(); let cov_a = coverage::create_ranges_from_py(ranges_a); let cov_b = coverage::create_ranges_from_py(ranges_b); let result = cov_a.difference(&cov_b); let result: Array2<u64> = result.into(); result.into_pyarray(py).to_owned()*/ coverage_op(py, a, b, |cov_a, cov_b| cov_a.difference(&cov_b)) } /// Perform the intersection between two spatial coverages /// /// # Arguments /// /// * ``a`` - The spatial coverage being the left operand /// * ``b`` - The spatial coverage being the right operand #[pyfn(m, "coverage_intersection")] fn coverage_intersection( py: Python, a: PyReadonlyArray2<u64>, b: PyReadonlyArray2<u64>, ) -> Py<PyArray2<u64>> { /*let ranges_a = a.as_array().to_owned(); let ranges_b = b.as_array().to_owned(); let cov_a = coverage::create_ranges_from_py(ranges_a); let cov_b = coverage::create_ranges_from_py(ranges_b); let result = cov_a.intersection(&cov_b); let result: Array2<u64> = result.into(); result.into_pyarray(py).to_owned()*/ coverage_op(py, a, b, |cov_a, cov_b| cov_a.intersection(&cov_b)) } /// Computes the complement of the given nested/ring coverage /// /// # Arguments /// /// * ``ranges`` - The input spatial coverage #[pyfn(m, "hpx_coverage_complement")] fn hpx_coverage_complement(py: Python, ranges: PyReadonlyArray2<u64>) -> Py<PyArray2<u64>> { coverage_complement(py, ranges, coverage::create_hpx_ranges_from_py_unchecked) } /// Computes the complement of the given time coverage /// /// # Arguments /// /// * ``ranges`` - The input time coverage #[pyfn(m, "time_coverage_complement")] fn time_coverage_complement(py: Python, ranges: PyReadonlyArray2<u64>) -> Py<PyArray2<u64>> { coverage_complement(py, ranges, coverage::create_time_ranges_from_py_uncheked) } /// Deserialize a spatial coverage from a json python dictionary /// /// JSON python dictionary stores (key, value) pair where: /// /// * the ``key`` is a ``char`` being a depth /// * the ``value`` is a list of HEALPix cell indices at the depth /// indicated by the ``key`` /// /// # Arguments /// /// * ``input`` - The input python dictionary /// /// # Errors /// /// * ``input`` dict must have string typed ``key``. /// * ``input`` dict values must be a list of unsigned integer encoded /// on 64 bits (i.e. an array of `u64`). #[pyfn(m, "coverage_from_json")] fn coverage_from_json(py: Python, input: &PyDict) -> PyResult<Py<PyArray2<u64>>> { let coverage = coverage::from_json(py, input)?; let result: Array2<u64> = mocranges_to_array2(coverage); Ok(result.into_pyarray(py).to_owned()) } /// Serialize a spatial coverage to a JSON format /// /// # Arguments /// /// * ``ranges`` - The spatial coverage ranges to serialize. #[pyfn(m, "coverage_to_json")] fn coverage_to_json(py: Python, ranges: PyReadonlyArray2<u64>) -> PyResult<PyObject> { let ranges = ranges.as_array().to_owned(); let coverage = coverage::create_hpx_ranges_from_py_unchecked(ranges); let result = coverage::to_json(py, coverage)?; Ok(result.to_object(py)) } /// Serialize a spatial MOC into an FITS file. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. /// * ``path`` - The file path #[pyfn(m, "spatial_moc_to_fits_file")] fn spatial_moc_to_fits_file( depth: u8, ranges: PyReadonlyArray2<u64>, path: String, ) -> PyResult<()> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_hpx_ranges_from_py_unchecked(ranges); spatial_coverage::to_fits_file(depth, ranges, path) .map_err(|e| exceptions::PyIOError::new_err(e.to_string())) } /// Serialize a spatial MOC into an ASCII file. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. /// * ``path`` - The file path #[pyfn(m, "spatial_moc_to_ascii_file")] fn spatial_moc_to_ascii_file( depth: u8, ranges: PyReadonlyArray2<u64>, path: String, ) -> PyResult<()> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_hpx_ranges_from_py_unchecked(ranges); spatial_coverage::to_ascii_file(depth, ranges, path) .map_err(exceptions::PyIOError::new_err) } /// Serialize a spatial MOC into a ASCII string. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. #[pyfn(m, "spatial_moc_to_ascii_str")] fn spatial_moc_to_ascii_str( py: Python, depth: u8, ranges: PyReadonlyArray2<u64>, ) -> Py<PyString> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_hpx_ranges_from_py_unchecked(ranges); PyString::new(py,&spatial_coverage::to_ascii_str(depth, ranges)).into() } /// Serialize a spatial MOC into a JSON file. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. /// * ``path`` - The file path #[pyfn(m, "spatial_moc_to_json_file")] fn spatial_moc_to_json_file( depth: u8, ranges: PyReadonlyArray2<u64>, path: String, ) -> PyResult<()> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_hpx_ranges_from_py_unchecked(ranges); spatial_coverage::to_json_file(depth, ranges, path) .map_err(exceptions::PyIOError::new_err) } /// Serialize a spatial MOC into a JSON string. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. #[pyfn(m, "spatial_moc_to_json_str")] fn spatial_moc_to_json_str( py: Python, depth: u8, ranges: PyReadonlyArray2<u64>, ) -> Py<PyString> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_hpx_ranges_from_py_unchecked(ranges); PyString::new(py,&spatial_coverage::to_json_str(depth, ranges)).into() } /// Create a 1D spatial coverage from the deserialization of a FITS file containing a multi-order map. /// /// The coverage computed contains the cells summing from ``cumul_from`` to ``cumul_to``. /// /// # Arguments /// /// * ``cumul_from`` - The cumulative value from which cells are put in the coverage /// * ``cumul_to`` - The cumulative value to which cells are put in the coverage /// * ``max_depth`` - the largest depth of the output MOC, which must be larger or equals to the largest /// depth in the `uniq` values /// * `asc`: cumulative value computed from lower to highest densities instead of from highest to lowest /// * `strict`: (sub-)cells overlapping the `cumul_from` or `cumul_to` values are not added /// * `no_split`: cells overlapping the `cumul_from` or `cumul_to` values are not recursively split /// * `reverse_decent`: perform the recursive decent from the highest cell number to the lowest (to be compatible with Aladin) /// /// # Info /// /// We expect the FITS file to be a BINTABLE containing a multi-order map. /// In this non-flexible approach, we expect the BINTABLE extension to contains: /// /// ```bash /// XTENSION= 'BINTABLE' / binary table extension /// BITPIX = 8 / array data type /// NAXIS = 2 / number of array dimensions /// AXIS1 = ?? / length of dimension 1 /// NAXIS2 = ?? / length of dimension 2 /// PCOUNT = 0 / number of group parameters /// GCOUNT = 1 / number of groups /// TFIELDS = xx / number of table fields /// TTYPE1 = 'UNIQ ' /// TFORM1 = 'K ' /// TTYPE2 = 'PROBDENSITY' /// TFORM2 = 'D ' /// TUNIT2 = 'sr-1 ' /// ... /// MOC = T /// PIXTYPE = 'HEALPIX ' / HEALPIX pixelisation /// ORDERING= 'NUNIQ ' / Pixel ordering scheme: RING, NESTED, or NUNIQ /// COORDSYS= 'C ' / Ecliptic, Galactic or Celestial (equatorial) /// MOCORDER= xx / MOC resolution (best order) /// ... /// END /// ``` #[pyfn(m, "spatial_moc_from_multiordermap_fits_file")] fn spatial_moc_from_multiordermap_fits_file( py: Python, path: String, cumul_from: f64, cumul_to: f64, asc: bool, strict: bool, no_split: bool, reverse_decent: bool ) -> PyResult<Py<PyArray2<u64>>> { use std::fs::File; use std::io::BufReader; use moc::deser::fits; let file = File::open(&path)?; let reader = BufReader::new(file); let ranges = fits::multiordermap::from_fits_multiordermap( reader, cumul_from, cumul_to, asc, strict, no_split, reverse_decent ).map_err(|e| exceptions::PyIOError::new_err(e.to_string()))?; let result: Array2<u64> = mocranges_to_array2(ranges.into_moc_ranges()); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a spatial MOC from a FITS file. /// /// # Arguments /// /// * ``path`` - The file path #[pyfn(m, "spatial_moc_from_fits_file")] fn spatial_moc_from_fits_file(py: Python, path: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = spatial_coverage::from_fits_file(path)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a spatial MOC from an ASCII file. /// /// # Arguments /// /// * ``path`` - The file path #[pyfn(m, "spatial_moc_from_ascii_file")] fn spatial_moc_from_ascii_file(py: Python, path: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = spatial_coverage::from_ascii_file(path)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a spatial MOC from a ASCII string. /// /// # Arguments /// /// * ``ascii`` - The json string #[pyfn(m, "spatial_moc_from_ascii_str")] fn spatial_moc_from_ascii_str(py: Python, ascii: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = spatial_coverage::from_ascii_str(ascii)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a spatial MOC from a JSON file. /// /// # Arguments /// /// * ``path`` - The file path #[pyfn(m, "spatial_moc_from_json_file")] fn spatial_moc_from_json_file(py: Python, path: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = spatial_coverage::from_json_file(path)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a spatial MOC from a JSON string. /// /// # Arguments /// /// * ``json`` - The json string #[pyfn(m, "spatial_moc_from_json_str")] fn spatial_moc_from_json_str(py: Python, json: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = spatial_coverage::from_json_str(json)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Serialize a time MOC into a FITS file. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. /// * ``path`` - The file path #[pyfn(m, "time_moc_to_fits_file")] fn time_moc_to_fits_file( depth: u8, ranges: PyReadonlyArray2<u64>, path: String, ) -> PyResult<()> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_time_ranges_from_py_uncheked(ranges); temporal_coverage::to_fits_file(depth, ranges, path) .map_err(|e| exceptions::PyIOError::new_err(e.to_string())) } /// Serialize a time MOC into an ASCII file. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. /// * ``path`` - The file path #[pyfn(m, "time_moc_to_ascii_file")] fn time_moc_to_ascii_file( depth: u8, ranges: PyReadonlyArray2<u64>, path: String, ) -> PyResult<()> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_time_ranges_from_py_uncheked(ranges); temporal_coverage::to_ascii_file(depth, ranges, path) .map_err(exceptions::PyIOError::new_err) } /// Serialize a time MOC into a ASCII string. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. #[pyfn(m, "time_moc_to_ascii_str")] fn time_moc_to_ascii_str( py: Python, depth: u8, ranges: PyReadonlyArray2<u64>, ) -> Py<PyString> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_time_ranges_from_py_uncheked(ranges); PyString::new(py,&temporal_coverage::to_ascii_str(depth, ranges)).into() } /// Serialize a time MOC into a JSON file. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. /// * ``path`` - The file path #[pyfn(m, "time_moc_to_json_file")] fn time_moc_to_json_file( depth: u8, ranges: PyReadonlyArray2<u64>, path: String, ) -> PyResult<()> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_time_ranges_from_py_uncheked(ranges); temporal_coverage::to_json_file(depth, ranges, path) .map_err(exceptions::PyIOError::new_err) } /// Serialize a time MOC into a JSON string. /// /// # Arguments /// /// * `depth``` - The depth of the MOC (needed to support the case in which there is no cell /// at the deepest level, in which case the computed depth will not be deep enough) /// * ``ranges`` - The list of time ranges to serialize. #[pyfn(m, "time_moc_to_json_str")] fn time_moc_to_json_str( py: Python, depth: u8, ranges: PyReadonlyArray2<u64>, ) -> Py<PyString> { let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_time_ranges_from_py_uncheked(ranges); PyString::new(py,&temporal_coverage::to_json_str(depth, ranges)).into() } /// Deserialize a time MOC from a FITS file. /// /// # Arguments /// /// * ``path`` - The file path #[pyfn(m, "time_moc_from_fits_file")] fn time_moc_from_fits_file(py: Python, path: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = temporal_coverage::from_fits_file(path)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a time MOC from an ASCII file. /// /// # Arguments /// /// * ``path`` - The file path #[pyfn(m, "time_moc_from_ascii_file")] fn time_moc_from_ascii_file(py: Python, path: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = temporal_coverage::from_ascii_file(path)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a time MOC from a ASCII string. /// /// # Arguments /// /// * ``ascii`` - The json string #[pyfn(m, "time_moc_from_ascii_str")] fn time_moc_from_ascii_str(py: Python, ascii: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = temporal_coverage::from_ascii_str(ascii)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a time MOC from a JSON file. /// /// # Arguments /// /// * ``path`` - The file path #[pyfn(m, "time_moc_from_json_file")] fn time_moc_from_json_file(py: Python, path: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = temporal_coverage::from_json_file(path)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Deserialize a time MOC from a JSON string. /// /// # Arguments /// /// * ``json`` - The json string #[pyfn(m, "time_moc_from_json_str")] fn time_moc_from_json_str(py: Python, json: String) -> PyResult<Py<PyArray2<u64>>> { let ranges = temporal_coverage::from_json_str(json)?; let result: Array2<u64> = mocranges_to_array2(ranges); Ok(result.into_pyarray(py).to_owned()) } /// Degrade a spatial coverage to a specific depth. /// /// # Arguments /// /// * ``ranges`` - The spatial coverage ranges to degrade. /// * ``depth`` - The depth to degrade the spatial coverage to. /// /// # Errors /// /// * ``depth`` is not comprised in `[0, Hpx::<T>::MAX_DEPTH] = [0, 29]` #[pyfn(m, "hpx_coverage_degrade")] fn hpx_coverage_degrade( py: Python, ranges: PyReadonlyArray2<u64>, depth: u8, ) -> PyResult<Py<PyArray2<u64>>> { coverage_degrade(py, ranges, depth, coverage::create_hpx_ranges_from_py_unchecked) } /// Expand the spatial coverage adding an external edge of max_depth pixels. /// /// # Arguments /// /// * ``max_depth`` - The MOC depth. /// * ``ranges`` - The spatial coverage ranges of max depth to be expanded. /// /// # Errors /// /// * ``depth`` is not comprised in `[0, Hpx::<T>::MAX_DEPTH] = [0, 29]` #[pyfn(m, "hpx_coverage_expand")] fn hpx_coverage_expand( py: Python, max_depth: u8, ranges: PyReadonlyArray2<u64>, ) -> PyResult<Py<PyArray2<u64>>> { let ranges = ranges.as_array().to_owned(); let coverage = coverage::create_hpx_ranges_from_py_unchecked(ranges); let result = spatial_coverage::expand(max_depth, coverage); let result = mocranges_to_array2(result); Ok(result.into_pyarray(py).to_owned()) } /// Contract the spatial coverage removing an internal edge of max_depth pixels. /// /// # Arguments /// /// * ``max_depth`` - The MOC depth. /// * ``ranges`` - The spatial coverage ranges of max depth to be contracted. /// /// # Errors /// /// * ``depth`` is not comprised in `[0, Hpx::<T>::MAX_DEPTH] = [0, 29]` #[pyfn(m, "hpx_coverage_contract")] fn hpx_coverage_contract( py: Python, max_depth: u8, ranges: PyReadonlyArray2<u64>, ) -> PyResult<Py<PyArray2<u64>>> { let ranges = ranges.as_array().to_owned(); let coverage = coverage::create_hpx_ranges_from_py_unchecked(ranges); let result = spatial_coverage::contract(max_depth, coverage); let result = mocranges_to_array2(result); Ok(result.into_pyarray(py).to_owned()) } /// Count the number of disjoint MOC this MOC contains. /// /// # Arguments /// /// * ``max_depth`` - The MOC depth. /// * ``include_indirect_neighbours`` - /// if `false`, only consider cells having a common edge as been part of a same MOC /// if `true`, also consider cells having a common vertex as been part of the same MOC /// * ``ranges`` - The spatial coverage ranges of max depth to be split. /// /// # Errors /// /// * ``depth`` is not comprised in `[0, Hpx::<T>::MAX_DEPTH] = [0, 29]` #[pyfn(m, "hpx_coverage_split_count")] fn hpx_coverage_split_count( max_depth: u8, include_indirect_neighbours: bool, ranges: PyReadonlyArray2<u64>, ) -> u32 { let ranges = ranges.as_array().to_owned(); let coverage = coverage::create_hpx_ranges_from_py_unchecked(ranges); let moc = RangeMOC::<u64, Hpx<u64>>::new(max_depth, coverage); moc.split_into_joint_mocs(include_indirect_neighbours).len() as u32 } /// Split the input MOC into disjoint MOCs. /// /// # Arguments /// /// * ``max_depth`` - The MOC depth. /// * ``include_indirect_neighbours`` - /// if `false`, only consider cells having a common edge as been part of a same MOC /// if `true`, also consider cells having a common vertex as been part of the same MOC /// * ``ranges`` - The spatial coverage ranges of max depth to be split. /// /// # Errors /// /// * ``depth`` is not comprised in `[0, Hpx::<T>::MAX_DEPTH] = [0, 29]` #[pyfn(m, "hpx_coverage_split")] fn hpx_coverage_split( py: Python, max_depth: u8, include_indirect_neighbours: bool, ranges: PyReadonlyArray2<u64>, ) -> PyResult<Py<PyList>> { let ranges = ranges.as_array().to_owned(); let coverage = coverage::create_hpx_ranges_from_py_unchecked(ranges); let moc = RangeMOC::<u64, Hpx<u64>>::new(max_depth, coverage); let mocs: Vec<Py<PyArray2<u64>>> = moc.split_into_joint_mocs(include_indirect_neighbours) .drain(..) .map(|cell_moc| vec_range_to_array2( cell_moc.into_cell_moc_iter().ranges().collect() ).into_pyarray(py).to_owned().into() ).collect(); PyList::new(py, mocs).extract() } /// Degrade a time coverage to a specific depth. /// /// # Arguments /// /// * ``ranges`` - The time coverage ranges to degrade. /// * ``depth`` - The depth to degrade the time coverage to. /// /// # Errors /// /// * ``depth`` is not comprised in `[0, Time::<T>::MAX_DEPTH] = [0, 62]` #[pyfn(m, "time_coverage_degrade")] fn time_coverage_degrade( py: Python, ranges: PyReadonlyArray2<u64>, depth: u8, ) -> PyResult<Py<PyArray2<u64>>> { coverage_degrade(py, ranges, depth, coverage::create_time_ranges_from_py_uncheked) } /// Make a generic coverage consistent /// /// # Infos /// /// This is an internal method whose purpose is not to be called /// by an user. It is called inside of the `mocpy.IntervalSet` class. /// /// # Arguments /// /// * ``ranges`` - The coverage ranges to make consistent. #[pyfn(m, "coverage_merge_gen_intervals")] fn coverage_merge_gen_intervals( py: Python, ranges: PyReadonlyArray2<u64> ) -> PyResult<Py<PyArray2<u64>>> { let ranges = ranges.as_array().to_owned(); let coverage = coverage::build_ranges_from_py(ranges); let result: Array2<u64> = ranges_to_array2(coverage); Ok(result.into_pyarray(py).to_owned()) } /// Make a spatial coverage consistent /// /// # Infos /// /// This is an internal method whose purpose is not to be called /// by an user. It is called inside of the `mocpy.IntervalSet` class. /// /// # Arguments /// /// * ``ranges`` - The spatial coverage ranges to make consistent. /// * ``min_depth`` - A minimum depth. This argument is optional. /// A min depth means that we do not want any HEALPix cells to be /// of depth < to ``min_depth``. This argument is used for example for /// plotting a spatial coverage. All HEALPix cells of depth < 2 are splitted /// into cells of depth 2. /// /// # Errors /// /// * ``min_depth`` is not comprised in `[0, Hpx::<T>::MAX_DEPTH] = [0, 29]` #[pyfn(m, "coverage_merge_hpx_intervals")] fn coverage_merge_hpx_intervals( py: Python, ranges: PyReadonlyArray2<u64>, min_depth: i8, ) -> PyResult<Py<PyArray2<u64>>> { coverage_merge_intervals(py, ranges, min_depth, coverage::build_hpx_ranges_from_py) } /// Make a time coverage consistent /// /// # Infos /// /// This is an internal method whose purpose is not to be called /// by an user. It is called inside of the `mocpy.IntervalSet` class. /// /// # Arguments /// /// * ``ranges`` - The time coverage ranges to make consistent. /// * ``min_depth`` - A minimum depth. This argument is optional. /// A min depth means that we do not want any cells to be /// of depth < to ``min_depth``. This argument is used for example for /// plotting a time coverage. All time cells of depth < 2 are splitted /// into cells of depth 2. /// /// # Errors /// /// * ``min_depth`` is not comprised in `[0, Time::<T>::MAX_DEPTH] = [0, 62]` #[pyfn(m, "coverage_merge_time_intervals")] fn coverage_merge_time_intervals( py: Python, ranges: PyReadonlyArray2<u64>, min_depth: i8, ) -> PyResult<Py<PyArray2<u64>>> { coverage_merge_intervals(py, ranges, min_depth, coverage::build_time_ranges_from_py) } /// Compute the depth of a spatial coverage /// /// # Arguments /// /// * ``ranges`` - The input coverage. #[pyfn(m, "hpx_coverage_depth")] fn hpx_coverage_depth(py: Python, ranges: PyReadonlyArray2<u64>) -> u8 { coverage_depth(py, ranges, coverage::create_hpx_ranges_from_py_unchecked) } /// Compute the depth of a time coverage /// /// # Arguments /// /// * ``ranges`` - The input coverage. #[pyfn(m, "time_coverage_depth")] fn time_coverage_depth(py: Python, ranges: PyReadonlyArray2<u64>) -> u8 { coverage_depth(py, ranges, coverage::create_time_ranges_from_py_uncheked) } fn coverage_depth<Q, F>(_py: Python, ranges: PyReadonlyArray2<u64>, to_moc_ranges: F) -> u8 where Q: MocQty<u64>, F: Fn(Array<u64, Ix2>) -> MocRanges<u64, Q> { let ranges = ranges.as_array().to_owned(); let coverage = to_moc_ranges(ranges); coverage::depth(&coverage) } /// Compute the sky fraction of a spatial coverage /// /// # Arguments /// /// * ``coverage`` - The spatial coverage /// * ``max_depth`` - The max depth of the spatial coverage. #[pyfn(m, "coverage_sky_fraction")] fn coverage_sky_fraction(_py: Python, ranges: PyReadonlyArray2<u64>) -> f32 { let ranges = ranges.as_array().to_owned(); coverage::sky_fraction(&ranges) } /// Convert HEALPix cell indices from the **uniq** to the **nested** format. /// /// # Arguments /// /// * ``ranges`` - The HEALPix cells defined in the **uniq** format. #[pyfn(m, "to_nested")] fn to_nested(py: Python, ranges: PyReadonlyArray1<u64>) -> Py<PyArray2<u64>> { let ranges = ranges.as_array().to_owned(); let result: Array2<u64> = if ranges.is_empty() { Array::zeros((0, 2)) } else { let shape = (ranges.shape()[0], 1); let start = ranges.into_shape(shape).unwrap(); let mut ranges: Vec<Range<u64>> = Vec::with_capacity(start.len()); for uniq in start { ranges.push(uniq..uniq + 1) } ranges.sort_by(|a, b| a.start.cmp(&b.start)); let nested_coverage = spatial_coverage::to_nested(HpxUniqRanges::new_unchecked(ranges)); mocranges_to_array2(nested_coverage) }; result.into_pyarray(py).to_owned() } /// Convert HEALPix cell indices from the **nested** to the **uniq** format. /// /// # Arguments /// /// * ``ranges`` - The HEALPix cells defined in the **nested** format. #[pyfn(m, "to_uniq")] fn to_uniq(py: Python, ranges: PyReadonlyArray2<u64>) -> Py<PyArray1<u64>> { use moc::moc::range::RangeMOC; use moc::moc::{RangeMOCIterator, RangeMOCIntoIterator}; let ranges = ranges.as_array().to_owned(); let result: Array1<u64> = if ranges.is_empty() { Array::zeros((0,)) } else { let nested_coverage = coverage::create_hpx_ranges_from_py_unchecked(ranges); // let uniq_coverage = nested_coverage.into_hpx_uniq(); // uniq_ranges_to_array1(uniq_coverage) let mut v: Vec<u64> = RangeMOC::new(29, nested_coverage).into_range_moc_iter() .cells() .map(|cell| cell.uniq_hpx()) .collect(); v.sort_unstable(); v.into() }; result.into_pyarray(py).to_owned() } /// Create a temporal coverage from a list of time ranges expressed in jd. /// /// # Arguments /// /// * ``min_times`` - The list of inf bounds of the time ranges expressed in **jd** /// * ``max_times`` - The list of sup bounds of the time ranges expressed in **jd** /// /// # WARNING /// * using `f64`, it is not precise to the microsecond, /// use `from_time_ranges_in_microsec_since_jd_origin` instead. /// /// /// # Errors /// /// * If the number of ``min_times`` and ``max_times`` do not match. #[pyfn(m, "from_time_ranges")] fn from_time_ranges( py: Python, min_times: PyReadonlyArray1<f64>, max_times: PyReadonlyArray1<f64>, ) -> PyResult<Py<PyArray2<u64>>> { let min_times = min_times.as_array().to_owned(); let max_times = max_times.as_array().to_owned(); let coverage: Array2<u64> = temporal_coverage::from_time_ranges(min_times, max_times)?; Ok(coverage.into_pyarray(py).to_owned()) } /// Create a temporal coverage from a list of time ranges expressed in microseconds since /// jd origin. /// /// # Arguments /// /// * ``min_times`` - The list of inf bounds of the time ranges expressed in microseconds since /// jd origin. /// * ``max_times`` - The list of sup bounds of the time ranges expressed in microseconds since /// jd origin. /// /// # Errors /// /// * If the number of ``min_times`` and ``max_times`` do not match. #[pyfn(m, "from_time_ranges_in_microsec_since_jd_origin")] fn from_time_ranges_in_microsec_since_jd_origin( py: Python, min_times: PyReadonlyArray1<u64>, max_times: PyReadonlyArray1<u64>, ) -> PyResult<Py<PyArray2<u64>>> { let min_times = min_times.as_array().to_owned(); let max_times = max_times.as_array().to_owned(); let coverage: Array2<u64> = temporal_coverage::from_time_ranges_in_microsec_since_jd_origin(min_times, max_times)?; Ok(coverage.into_pyarray(py).to_owned()) } /// Flatten HEALPix cells to a specific depth /// /// # Arguments /// /// * ``data`` - The spatial coverage /// * ``depth`` - The depth to flatten the coverage to. #[pyfn(m, "flatten_pixels")] fn flatten_hpx_pixels(py: Python, data: PyReadonlyArray2<u64>, depth: u8) -> Py<PyArray1<u64>> { let data = data.as_array().to_owned(); let result = coverage::flatten_hpx_pixels(data, depth); result.into_pyarray(py).to_owned() } /// Create a spatial coverage from a list of HEALPix cell indices. /// /// # Arguments /// /// * ``pixels`` - A set of HEALPix cell indices /// * ``depth`` - The depths of each HEALPix cell indices /// /// # Precondition /// /// ``pixels`` and ``depth`` must be valid. This means that: /// /// * ``depth`` contains values in the range `[0, <T>::MAXDEPTH] = [0, 29]` /// * ``pixels`` contains values in the range `[0, 12*4**(depth)]` /// /// # Errors /// /// * ``depth`` and ``pixels`` have not the same length. #[pyfn(m, "from_healpix_cells")] fn from_healpix_cells( py: Python, pixels: PyReadonlyArray1<u64>, depth: PyReadonlyArray1<u8>, ) -> PyResult<Py<PyArray2<u64>>> { let pixels = pixels.as_array().to_owned(); let depth = depth.as_array().to_owned(); let result = spatial_coverage::from_healpix_cells(pixels, depth)?; Ok(result.into_pyarray(py).to_owned()) } /// Create a spatial coverage from an HEALPix map, i.e. from a list of HEALPix cell indices /// at the same depth. /// /// # Arguments /// /// * ``pixels`` - A set of HEALPix cell indices /// * ``depth`` - The depths of each HEALPix cell indices /// /// # Precondition /// /// * ``depth`` is a value in the range `[0, <T>::MAXDEPTH] = [0, 29]` /// * ``pixels`` contains values in the range `[0, 12*4**(depth)]` #[pyfn(m, "from_healpix_cells")] fn from_healpix_map( py: Python, pixels: PyReadonlyArray1<u64>, depth: PyReadonlyArray1<u8>, ) -> PyResult<Py<PyArray2<u64>>> { let pixels = pixels.as_array().to_owned(); let depth = depth.as_array().to_owned(); let result = spatial_coverage::from_healpix_cells(pixels, depth)?; Ok(result.into_pyarray(py).to_owned()) } /// Create a spatial coverage from a given ring. /// /// # Arguments /// /// * ``ra_deg`` - longitude of the center of the ring, in degrees /// * ``dec_deg`` - latitude of the center of the ring, in degrees /// * ``r_int_deg`` - Internal radius of the ring, in degrees /// * ``r_ext_deg`` - External radius of the ring, in degrees /// * ``depth`` - The depths of the expected MOC /// * ``delta_depth`` - parameter controlling the approximation (typical value: 2) /// /// # Errors /// /// If one of the following conditions is not met: /// /// * ``depth`` contains values in the range `[0, <T>::MAXDEPTH] = [0, 29]` /// * ``r_int_deg`` contains values in the range `[0, 180]` /// * ``r_ext_deg`` contains values in the range `[0, 180]` /// * ``r_ext_deg > r_int_deg`` /// #[pyfn(m, "from_ring")] fn from_ring( py: Python, lon_deg: f64, lat_deg: f64, r_int_deg: f64, r_ext_deg: f64, depth: u8, delta_depth: u8, ) -> PyResult<Py<PyArray2<u64>>> { let moc_ranges = RangeMOC::from_ring( lon_deg.to_radians(), lat_deg.to_radians(), r_int_deg.to_radians(), r_ext_deg.to_radians(), depth, delta_depth ).into_moc_ranges(); let result = mocranges_to_array2(moc_ranges); Ok(result.into_pyarray(py).to_owned()) } Ok(()) }
{ /*let ranges_a = a.as_array().to_owned(); let ranges_b = b.as_array().to_owned(); let cov_a = coverage::create_ranges_from_py_unchecked(ranges_a); let cov_b = coverage::create_ranges_from_py_unchecked(ranges_b); let result = cov_a.union(&cov_b); let result: Array2<u64> = result.into(); result.to_owned().into_pyarray(py).to_owned()*/ coverage_op(py, a, b, |cov_a, cov_b| cov_a.union(&cov_b)) }
dataItemReactionSpeed.ts
export class dataItemReactionSpeed { private _datetimeMyLastMessage: Date; private _datetimeTheirResponse: Date; private _datetimebetween: Date; constructor(datetimeMyLastMessage: string, datetimeTheirResponse: string){ this._datetimeMyLastMessage = new Date(datetimeMyLastMessage);
this._datetimeTheirResponse = new Date(datetimeTheirResponse); this._datetimebetween = this.getDateTimeBetweenDates(this._datetimeMyLastMessage, this._datetimeTheirResponse); } private getDateTimeBetweenDates(datetimeMyLastMessage: Date, datetimeTheirResponse: Date){ //todo inplement return new Date(); } }
hash.go
package hash func StringI64(str string)int64{ h := int64(0) for i:=0;i< len(str);i++ { v:=str[i] h = 31 * h + int64(v & 0xff); } return h & 0x7fffffffffffffff } type elem struct { key string val interface{} } type HashMap struct { cap int64 Bucket [][]*elem } func newHashMap(cap int)*HashMap{ cap = mod(cap) h:=&HashMap{ cap: int64(cap), Bucket: make([][]*elem,cap), } return h } func (h *HashMap)Set(key string,v interface{}){ idx:=StringI64(key) & h.cap for _, val := range h.Bucket[idx] { if val.key == key
} h.Bucket[idx] = append(h.Bucket[idx],&elem{ key: key, val: v, }) } func (h *HashMap)Get(key string)interface{}{ idx:=StringI64(key) & h.cap for _, e := range h.Bucket[idx] { if e.key== key{ return e.val } } return nil } func mod(i int)int{ s:=0 for i> 1{ i/=2 s++ } return 1<<s }
{ val.val = v return }
record_result.py
from . import BaseResult class RecordResult(BaseResult):
@property def url(self): return self.component.url @property def duration(self): return self.component.duration @property def size(self): return self.component.size
alternative_compilation_test.py
# Copyright 2018 Braxton Mckee # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typed_python import * import typed_python._types as _types from nativepython.runtime import Runtime import unittest import time def Compiled(f): f = Function(f) return Runtime.singleton().compile(f) class TestAlternativeCompilation(unittest.TestCase): def test_simple_alternative_passing(self): Simple = Alternative("Simple", A={}, B={}, C={}) @Compiled def f(s: Simple): y = s return y self.assertEqual(f(Simple.A()), Simple.A()) self.assertEqual(f(Simple.B()), Simple.B()) self.assertEqual(f(Simple.C()), Simple.C()) def test_complex_alternative_passing(self): Complex = Alternative( "Complex", A={'a': str, 'b': int}, B={'a': str, 'c': int}, C={'a': str, 'd': lambda: Complex} ) c = Complex.A(a="hi", b=20) c2 = Complex.C(a="hi", d=c) @Compiled def f(c: Complex): y = c return y self.assertEqual(f(c), c) self.assertEqual(f(c2), c2) self.assertEqual(_types.refcount(c), 2) self.assertEqual(_types.refcount(c2), 1) def test_construct_alternative(self): A = Alternative("A", X={'x': int}) @Compiled def f(): return A.X(x=10) self.assertTrue(f().matches.X) self.assertEqual(f().x, 10) def test_alternative_matches(self): A = Alternative("A", X={'x': int}, Y={'x': int}) @Compiled def f(x: A): return x.matches.X self.assertTrue(f(A.X())) self.assertFalse(f(A.Y())) def test_alternative_member_homogenous(self): A = Alternative("A", X={'x': int}, Y={'x': int}) @Compiled def f(x: A): return x.x self.assertEqual(f(A.X(x=10)), 10) self.assertEqual(f(A.Y(x=10)), 10) def test_alternative_member_diverse(self): A = Alternative("A", X={'x': int}, Y={'x': float}) @Compiled def f(x: A): return x.x self.assertEqual(f(A.X(x=10)), 10) self.assertEqual(f(A.Y(x=10.5)), 10.5) def test_alternative_member_distinct(self): A = Alternative("A", X={'x': int}, Y={'y': float}) @Compiled def f(x: A): if x.matches.X: return x.x if x.matches.Y: return x.y self.assertEqual(f(A.X(x=10)), 10) self.assertEqual(f(A.Y(y=10.5)), 10.5) def test_matching_recursively(self): @TypeFunction def Tree(T): return Alternative( "Tree", Leaf={'value': T}, Node={'left': Tree(T), 'right': Tree(T)} ) def treeSum(x: Tree(int)): matches = x.matches.Leaf if matches: return x.value if x.matches.Node: return treeSum(x.left) + treeSum(x.right) return 0 def
(depth: int, offset: int) -> Tree(int): if depth > 0: return Tree(int).Node( left=buildTree(depth-1, offset), right=buildTree(depth-1, offset+1), ) return Tree(int).Leaf(value=offset) aTree = Compiled(buildTree)(15, 0) treeSumCompiled = Compiled(treeSum) t0 = time.time() sum = treeSum(aTree) t1 = time.time() sumCompiled = treeSumCompiled(aTree) t2 = time.time() self.assertEqual(sum, sumCompiled) speedup = (t1-t0)/(t2-t1) self.assertGreater(speedup, 20) # I get about 50
buildTree
mapImage.ts
import * as mongoose from 'mongoose'; const mapImageSchema = new mongoose.Schema({ name: String, topLeftX: Number, topLeftY: Number, topRightX: Number, topRightY: Number, bottomRightX: Number,
const MapImage = mongoose.model('MapImage', mapImageSchema); export default MapImage;
bottomRightY: Number, floor: Number }, { collection: 'mapImages' });
util.go
package util import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net/http" ) var tgAlerter TgAlerter type TgAlerter struct { BotId string ChatId string } func InitTgAlerter(cfg AlertConfig) { tgAlerter = TgAlerter{ BotId: cfg.TelegramBotId, ChatId: cfg.TelegramChatId, } } type MessageInfo struct { MsgType string `json:"msgtype"` Text TextInfo `json:"text"` } type TextInfo struct { Content string `json:"content"` MentionedList []string `json:"mentioned_list"` MentionedMobileList []string `json:"mentioned_mobile_list"` } func
(msg string) { if tgAlerter.BotId == "" || tgAlerter.ChatId == "" || msg == "" { return } msg = fmt.Sprintf("eth-main-side-swap-backend alert: %s", msg) endPoint := fmt.Sprintf("https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=%s", tgAlerter.BotId) textInfo := TextInfo{ Content: msg, MentionedList: []string{"@all"}, MentionedMobileList: []string{"@all"}} postInfo := MessageInfo { MsgType: "text", Text: textInfo, } jsonBytes, err := json.Marshal(postInfo) if err != nil { Logger.Errorf("send message error=%s", err) return } Logger.Infof("send tg message, msg=%s", msg) req, err := http.NewRequest("POST", endPoint, bytes.NewBuffer(jsonBytes)) if err != nil { Logger.Errorf("send message error=%s", err) return } req.Header.Set("Content-Type", "application/json;charset=UTF-8") client := &http.Client{} resp, err := client.Do(req) if err != nil{ Logger.Errorf("send message error=%s", err) return } resBytes, err := ioutil.ReadAll(resp.Body) if err != nil { Logger.Errorf("send message error=%s", err) return } Logger.Infof("response, msg=%s", string(resBytes)) defer resp.Body.Close() }
SendTelegramMessage
x_sheet_condition2.py
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Interface Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.sheet from typing import TYPE_CHECKING from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME _DYNAMIC = False if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT: _DYNAMIC = True if not TYPE_CHECKING and _DYNAMIC: from com.sun.star.sheet import XSheetCondition2 as XSheetCondition2 setattr(XSheetCondition2, '__ooo_ns__', 'com.sun.star.sheet') setattr(XSheetCondition2, '__ooo_full_ns__', 'com.sun.star.sheet.XSheetCondition2') setattr(XSheetCondition2, '__ooo_type_name__', 'interface') else:
__all__ = ['XSheetCondition2']
from ...lo.sheet.x_sheet_condition2 import XSheetCondition2 as XSheetCondition2
AlwaysOnTopWindowToggle.js
// @flow import React, { Component } from 'react'; import { connect } from 'react-redux'; import type { Dispatch } from 'redux'; import { setWindowAlwaysOnTop } from '../actions'; import ToggleWithLabel from './ToggleWithLabel'; type Props = { /** * Redux dispatch. */ dispatch: Dispatch<*>; /** * Window Always on Top value in (redux) state. */ _alwaysOnTopWindowEnabled: boolean; }; /** * Window always open on top placed in Settings Drawer. */ class
extends Component<Props> { /** * Initializes a new {@code AlwaysOnTopWindowToggle} instance. * * @inheritdoc */ constructor(props) { super(props); this._onAlwaysOnTopWindowToggleChange = this._onAlwaysOnTopWindowToggleChange.bind(this); } /** * Render function of component. * * @returns {ReactElement} */ render() { return ( <ToggleWithLabel isDefaultChecked = { this.props._alwaysOnTopWindowEnabled } label = 'Fenêtre toujours au-dessus' onChange = { this._onAlwaysOnTopWindowToggleChange } value = { this.props._alwaysOnTopWindowEnabled } /> ); } _onAlwaysOnTopWindowToggleChange: (*) => void; /** * Toggles alwaysOnTopWindowEnabled. * * @returns {void} */ _onAlwaysOnTopWindowToggleChange() { const { _alwaysOnTopWindowEnabled } = this.props; const newState = !_alwaysOnTopWindowEnabled; this.props.dispatch(setWindowAlwaysOnTop(newState)); } } /** * Maps (parts of) the redux state to the React props. * * @param {Object} state - The redux state. * @returns {{ * _alwaysOnTopWindowEnabled: boolean, * }} */ function _mapStateToProps(state: Object) { return { _alwaysOnTopWindowEnabled: state.settings.alwaysOnTopWindowEnabled }; } export default connect(_mapStateToProps)(AlwaysOnTopWindowToggle);
AlwaysOnTopWindowToggle
__init__.py
import subprocess, os, glob, GlobalUtils, InstallUtil, Platform from JsonUtil import * from menu import cmenu yes = set(["yes", "y"]) home = os.getenv("HOME") JSON_LOCATION = home + "/.instpakg" DEFAULT_JSON = JSON_LOCATION + "/DEFAULT.json" jsonInstall = "" markedInstall = [] markedRepo = [] markedCommand = [] def initJson(): global jsonInstall load_json(DEFAULT_JSON) if Platform.getPackage("apt"): jsonInstall = get_json("apt") elif Platform.getPackage("yum"): jsonInstall = get_json("yum") def bulkInstall(): initJson() for item in jsonInstall: InstallUtil.forceAddRepository(item["repo"]) InstallUtil.update() for item in root: if item["command"]: InstallUtil.call(item["command"]) InstallUtil.forceInstall(item["app"]) close_json() def mark(program, repo, command): markedInstall.append(program) if repo: choice = raw_input("Do you want to add ppa " + repo + " (Required to install " + program +") (y/n)").lower() if choice in yes: markedRepo.append(repo) else: print("Cancelled install of " + program) markedInstall.remove(program) elif command: choice = raw_input("The following command is required in order to install " + program + "are you sure? (y/n)\n\033[1m" + command + "\033[0m").lower() if choice in yes: markedCommand.append(command) else: print("Cancelled install of " + program) markedInstall.remove(program) def promptInstall():
def selectJSON(): global DEFAULT_JSON num = -1 GlobalUtils.clear() for file in os.listdir(JSON_LOCATION): if file.endswith(".json"): files = glob.glob(JSON_LOCATION+"/*.json") num += 1 print("["+str(num) + "] " + file) choice = raw_input("Choose one [0-"+str(num)+"] ") print(files[int(choice)]) DEFAULT_JSON = files[int(choice)] def main(): try: list = [{ "Install software": promptInstall }, {"Bulk Software Install": bulkInstall}, {"Select JSON file": selectJSON}, {"Exit": GlobalUtils.exit}] menu = cmenu(list, "InstPakg Menu") menu.display() except SystemExit: pass else: menu.cleanup()
GlobalUtils.clear() initJson() for item in jsonInstall: print(item["app"] + "\n-----------------\nINSERT DESCRIPTION!\n") choice = raw_input("Do you want to mark\033[1m " + item["app"] + "\033[0m for install? (y/n)").lower() if choice in yes: mark(item["app"], item["repo"], item["command"]) if markedCommand: choice = raw_input("The following code will now run, are you sure (y/n) \n" + str(markedCommand)).lower() if choice in yes: for item in markedCommand: InstallUtil.call(item) if markedRepo: choice = raw_input("The following repositories will be added, are you sure? (y/n)\n\033[1m" + str(markedRepo) + "\033[0m").lower() if choice in yes: for item in markedRepo: InstallUtil.addRepository(item) InstallUtil.update() else: print("No external repositories are required!") choice = raw_input("Are you sure you want to install the following programs? -\n " + str(markedInstall)) if choice in yes: for item in markedInstall: InstallUtil.install(item) close_json()
yolact.py
import torch, torchvision import torch.nn as nn import torch.nn.functional as F from torchvision.models.resnet import Bottleneck import numpy as np from itertools import product from math import sqrt from typing import List from collections import defaultdict from data.config import cfg, mask_type from layers import Detect from layers.interpolate import InterpolateModule from backbone import construct_backbone import torch.backends.cudnn as cudnn from utils import timer from utils.functions import MovingAverage, make_net # This is required for Pytorch 1.0.1 on Windows to initialize Cuda on some driver versions. # See the bug report here: https://github.com/pytorch/pytorch/issues/17108 torch.cuda.current_device() # As of March 10, 2019, Pytorch DataParallel still doesn't support JIT Script Modules use_jit = torch.cuda.device_count() <= 1 if not use_jit: print('Multiple GPUs detected! Turning off JIT.') ScriptModuleWrapper = torch.jit.ScriptModule if use_jit else nn.Module script_method_wrapper = torch.jit.script_method if use_jit else lambda fn, _rcn=None: fn class Concat(nn.Module): def __init__(self, nets, extra_params): super().__init__() self.nets = nn.ModuleList(nets) self.extra_params = extra_params def forward(self, x): # Concat each along the channel dimension return torch.cat([net(x) for net in self.nets], dim=1, **self.extra_params) prior_cache = defaultdict(lambda: None) class PredictionModule(nn.Module): """ The (c) prediction module adapted from DSSD: https://arxiv.org/pdf/1701.06659.pdf Note that this is slightly different to the module in the paper because the Bottleneck block actually has a 3x3 convolution in the middle instead of a 1x1 convolution. Though, I really can't be arsed to implement it myself, and, who knows, this might be better. Args: - in_channels: The input feature size. - out_channels: The output feature size (must be a multiple of 4). - aspect_ratios: A list of lists of priorbox aspect ratios (one list per scale). - scales: A list of priorbox scales relative to this layer's convsize. For instance: If this layer has convouts of size 30x30 for an image of size 600x600, the 'default' (scale of 1) for this layer would produce bounding boxes with an area of 20x20px. If the scale is .5 on the other hand, this layer would consider bounding boxes with area 10x10px, etc. - parent: If parent is a PredictionModule, this module will use all the layers from parent instead of from this module. """ def __init__(self, in_channels, out_channels=1024, aspect_ratios=[[1]], scales=[1], parent=None, index=0): super().__init__() self.num_classes = cfg.num_classes self.mask_dim = cfg.mask_dim # Defined by Yolact self.num_priors = sum(len(x)*len(scales) for x in aspect_ratios) self.parent = [parent] # Don't include this in the state dict self.index = index self.num_heads = cfg.num_heads # Defined by Yolact if cfg.mask_proto_split_prototypes_by_head and cfg.mask_type == mask_type.lincomb: self.mask_dim = self.mask_dim // self.num_heads if cfg.mask_proto_prototypes_as_features: in_channels += self.mask_dim if parent is None: if cfg.extra_head_net is None: out_channels = in_channels else: self.upfeature, out_channels = make_net(in_channels, cfg.extra_head_net) if cfg.use_prediction_module: self.block = Bottleneck(out_channels, out_channels // 4) self.conv = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=True) self.bn = nn.BatchNorm2d(out_channels) self.bbox_layer = nn.Conv2d(out_channels, self.num_priors * 4, **cfg.head_layer_params) self.conf_layer = nn.Conv2d(out_channels, self.num_priors * self.num_classes, **cfg.head_layer_params) self.mask_layer = nn.Conv2d(out_channels, self.num_priors * self.mask_dim, **cfg.head_layer_params) if cfg.use_mask_scoring: self.score_layer = nn.Conv2d(out_channels, self.num_priors, **cfg.head_layer_params) if cfg.use_instance_coeff: self.inst_layer = nn.Conv2d(out_channels, self.num_priors * cfg.num_instance_coeffs, **cfg.head_layer_params) # What is this ugly lambda doing in the middle of all this clean prediction module code? def make_extra(num_layers): if num_layers == 0: return lambda x: x else: # Looks more complicated than it is. This just creates an array of num_layers alternating conv-relu return nn.Sequential(*sum([[ nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), nn.ReLU(inplace=True) ] for _ in range(num_layers)], [])) self.bbox_extra, self.conf_extra, self.mask_extra = [make_extra(x) for x in cfg.extra_layers] if cfg.mask_type == mask_type.lincomb and cfg.mask_proto_coeff_gate: self.gate_layer = nn.Conv2d(out_channels, self.num_priors * self.mask_dim, kernel_size=3, padding=1) self.aspect_ratios = aspect_ratios self.scales = scales self.priors = None self.last_conv_size = None self.last_img_size = None def forward(self, x): """ Args: - x: The convOut from a layer in the backbone network Size: [batch_size, in_channels, conv_h, conv_w]) Returns a tuple (bbox_coords, class_confs, mask_output, prior_boxes) with sizes - bbox_coords: [batch_size, conv_h*conv_w*num_priors, 4] - class_confs: [batch_size, conv_h*conv_w*num_priors, num_classes] - mask_output: [batch_size, conv_h*conv_w*num_priors, mask_dim] - prior_boxes: [conv_h*conv_w*num_priors, 4] """ # In case we want to use another module's layers src = self if self.parent[0] is None else self.parent[0] conv_h = x.size(2) conv_w = x.size(3) if cfg.extra_head_net is not None: x = src.upfeature(x) if cfg.use_prediction_module: # The two branches of PM design (c) a = src.block(x) b = src.conv(x) b = src.bn(b) b = F.relu(b) # TODO: Possibly switch this out for a product x = a + b bbox_x = src.bbox_extra(x) conf_x = src.conf_extra(x) mask_x = src.mask_extra(x) bbox = src.bbox_layer(bbox_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, 4) conf = src.conf_layer(conf_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.num_classes) if cfg.eval_mask_branch: mask = src.mask_layer(mask_x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.mask_dim) else: mask = torch.zeros(x.size(0), bbox.size(1), self.mask_dim, device=bbox.device) if cfg.use_mask_scoring: score = src.score_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, 1) if cfg.use_instance_coeff: inst = src.inst_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, cfg.num_instance_coeffs) # See box_utils.decode for an explanation of this if cfg.use_yolo_regressors: bbox[:, :, :2] = torch.sigmoid(bbox[:, :, :2]) - 0.5 bbox[:, :, 0] /= conv_w bbox[:, :, 1] /= conv_h if cfg.eval_mask_branch: if cfg.mask_type == mask_type.direct: mask = torch.sigmoid(mask) elif cfg.mask_type == mask_type.lincomb: mask = cfg.mask_proto_coeff_activation(mask) if cfg.mask_proto_coeff_gate: gate = src.gate_layer(x).permute(0, 2, 3, 1).contiguous().view(x.size(0), -1, self.mask_dim) mask = mask * torch.sigmoid(gate) if cfg.mask_proto_split_prototypes_by_head and cfg.mask_type == mask_type.lincomb: mask = F.pad(mask, (self.index * self.mask_dim, (self.num_heads - self.index - 1) * self.mask_dim), mode='constant', value=0) priors = self.make_priors(conv_h, conv_w, x.device) preds = { 'loc': bbox, 'conf': conf, 'mask': mask, 'priors': priors } if cfg.use_mask_scoring: preds['score'] = score if cfg.use_instance_coeff: preds['inst'] = inst return preds def make_priors(self, conv_h, conv_w, device): """ Note that priors are [x,y,width,height] where (x,y) is the center of the box. """ global prior_cache size = (conv_h, conv_w) with timer.env('makepriors'): if self.last_img_size != (cfg._tmp_img_w, cfg._tmp_img_h): prior_data = [] # Iteration order is important (it has to sync up with the convout) for j, i in product(range(conv_h), range(conv_w)): # +0.5 because priors are in center-size notation x = (i + 0.5) / conv_w y = (j + 0.5) / conv_h for ars in self.aspect_ratios: for scale in self.scales: for ar in ars: if not cfg.backbone.preapply_sqrt: ar = sqrt(ar) if cfg.backbone.use_pixel_scales: w = scale * ar / cfg.max_size h = scale / ar / cfg.max_size else: w = scale * ar / conv_w h = scale / ar / conv_h # This is for backward compatability with a bug where I made everything square by accident if cfg.backbone.use_square_anchors: h = w prior_data += [x, y, w, h] self.priors = torch.Tensor(prior_data, device=device).view(-1, 4).detach() self.priors.requires_grad = False self.last_img_size = (cfg._tmp_img_w, cfg._tmp_img_h) self.last_conv_size = (conv_w, conv_h) prior_cache[size] = None elif self.priors.device != device: # This whole weird situation is so that DataParalell doesn't copy the priors each iteration if prior_cache[size] is None: prior_cache[size] = {} if device not in prior_cache[size]: prior_cache[size][device] = self.priors.to(device) self.priors = prior_cache[size][device] return self.priors class FPN(ScriptModuleWrapper): """ Implements a general version of the FPN introduced in https://arxiv.org/pdf/1612.03144.pdf Parameters (in cfg.fpn): - num_features (int): The number of output features in the fpn layers. - interpolation_mode (str): The mode to pass to F.interpolate. - num_downsample (int): The number of downsampled layers to add onto the selected layers. These extra layers are downsampled from the last selected layer. Args: - in_channels (list): For each conv layer you supply in the forward pass, how many features will it have? """ __constants__ = ['interpolation_mode', 'num_downsample', 'use_conv_downsample', 'relu_pred_layers', 'lat_layers', 'pred_layers', 'downsample_layers', 'relu_downsample_layers'] def __init__(self, in_channels): super().__init__() self.lat_layers = nn.ModuleList([ nn.Conv2d(x, cfg.fpn.num_features, kernel_size=1) for x in reversed(in_channels) ]) # This is here for backwards compatability padding = 1 if cfg.fpn.pad else 0 self.pred_layers = nn.ModuleList([ nn.Conv2d(cfg.fpn.num_features, cfg.fpn.num_features, kernel_size=3, padding=padding) for _ in in_channels ]) if cfg.fpn.use_conv_downsample: self.downsample_layers = nn.ModuleList([ nn.Conv2d(cfg.fpn.num_features, cfg.fpn.num_features, kernel_size=3, padding=1, stride=2) for _ in range(cfg.fpn.num_downsample) ]) self.interpolation_mode = cfg.fpn.interpolation_mode self.num_downsample = cfg.fpn.num_downsample self.use_conv_downsample = cfg.fpn.use_conv_downsample self.relu_downsample_layers = cfg.fpn.relu_downsample_layers self.relu_pred_layers = cfg.fpn.relu_pred_layers @script_method_wrapper def forward(self, convouts:List[torch.Tensor]): """ Args: - convouts (list): A list of convouts for the corresponding layers in in_channels. Returns: - A list of FPN convouts in the same order as x with extra downsample layers if requested. """ out = [] x = torch.zeros(1, device=convouts[0].device) for i in range(len(convouts)): out.append(x) # For backward compatability, the conv layers are stored in reverse but the input and output is # given in the correct order. Thus, use j=-i-1 for the input and output and i for the conv layers. j = len(convouts) for lat_layer in self.lat_layers: j -= 1 if j < len(convouts) - 1: _, _, h, w = convouts[j].size() x = F.interpolate(x, size=(h, w), mode=self.interpolation_mode, align_corners=False) x = x + lat_layer(convouts[j]) out[j] = x # This janky second loop is here because TorchScript. j = len(convouts) for pred_layer in self.pred_layers: j -= 1 out[j] = pred_layer(out[j]) if self.relu_pred_layers: F.relu(out[j], inplace=True) cur_idx = len(out) # In the original paper, this takes care of P6 if self.use_conv_downsample: for downsample_layer in self.downsample_layers: out.append(downsample_layer(out[-1])) else: for idx in range(self.num_downsample): # Note: this is an untested alternative to out.append(out[-1][:, :, ::2, ::2]). Thanks TorchScript. out.append(nn.functional.max_pool2d(out[-1], 1, stride=2)) if self.relu_downsample_layers: for idx in range(len(out) - cur_idx): out[idx] = F.relu(out[idx + cur_idx], inplace=False) return out class FastMaskIoUNet(ScriptModuleWrapper): def __init__(self): super().__init__() input_channels = 1 last_layer = [(cfg.num_classes-1, 1, {})] self.maskiou_net, _ = make_net(input_channels, cfg.maskiou_net + last_layer, include_last_relu=True) def forward(self, x): x = self.maskiou_net(x) maskiou_p = F.max_pool2d(x, kernel_size=x.size()[2:]).squeeze(-1).squeeze(-1) return maskiou_p class Yolact(nn.Module): """ ██╗ ██╗ ██████╗ ██╗ █████╗ ██████╗████████╗ ╚██╗ ██╔╝██╔═══██╗██║ ██╔══██╗██╔════╝╚══██╔══╝ ╚████╔╝ ██║ ██║██║ ███████║██║ ██║ ╚██╔╝ ██║ ██║██║ ██╔══██║██║ ██║ ██║ ╚██████╔╝███████╗██║ ██║╚██████╗ ██║ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ You can set the arguments by changing them in the backbone config object in config.py. Parameters (in cfg.backbone): - selected_layers: The indices of the conv layers to use for prediction. - pred_scales: A list with len(selected_layers) containing tuples of scales (see PredictionModule) - pred_aspect_ratios: A list of lists of aspect ratios with len(selected_layers) (see PredictionModule) """ def __init__(self, only_last_layer=False): super().__init__() self.only_last_layer = only_last_layer self.backbone = construct_backbone(cfg.backbone) if cfg.freeze_bn: self.freeze_bn() # Compute mask_dim here and add it back to the config. Make sure Yolact's constructor is called early! if cfg.mask_type == mask_type.direct: cfg.mask_dim = cfg.mask_size**2 elif cfg.mask_type == mask
in list(state_dict.keys()): if key.startswith('backbone.layer') and not key.startswith('backbone.layers'): del state_dict[key] # Also for backward compatibility with v1.0 weights, do this check if key.startswith('fpn.downsample_layers.'): if cfg.fpn is not None and int(key.split('.')[2]) >= cfg.fpn.num_downsample: del state_dict[key] # Uncomment this in normal conditions # self.load_state_dict(state_dict) # Added this for fine-tuning. Comment this in normal conditions. try: self.load_state_dict(state_dict) except RuntimeError as e: print('Ignoring "' + str(e) + '"') def init_weights(self, backbone_path): """ Initialize weights for training. """ # Initialize the backbone with the pretrained weights. self.backbone.init_backbone(backbone_path) conv_constants = getattr(nn.Conv2d(1, 1, 1), '__constants__') # Quick lambda to test if one list contains the other def all_in(x, y): for _x in x: if _x not in y: return False return True # Initialize the rest of the conv layers with xavier for name, module in self.named_modules(): # See issue #127 for why we need such a complicated condition if the module is a WeakScriptModuleProxy # Broke in 1.3 (see issue #175), WeakScriptModuleProxy was turned into just ScriptModule. # Broke in 1.4 (see issue #292), where RecursiveScriptModule is the new star of the show. # Note that this might break with future pytorch updates, so let me know if it does is_script_conv = False if 'Script' in type(module).__name__: # 1.4 workaround: now there's an original_name member so just use that if hasattr(module, 'original_name'): is_script_conv = 'Conv' in module.original_name # 1.3 workaround: check if this has the same constants as a conv module else: is_script_conv = ( all_in(module.__dict__['_constants_set'], conv_constants) and all_in(conv_constants, module.__dict__['_constants_set'])) is_conv_layer = isinstance(module, nn.Conv2d) or is_script_conv if is_conv_layer and module not in self.backbone.backbone_modules: nn.init.xavier_uniform_(module.weight.data) if module.bias is not None: if cfg.use_focal_loss and 'conf_layer' in name: if not cfg.use_sigmoid_focal_loss: # Initialize the last layer as in the focal loss paper. # Because we use softmax and not sigmoid, I had to derive an alternate expression # on a notecard. Define pi to be the probability of outputting a foreground detection. # Then let z = sum(exp(x)) - exp(x_0). Finally let c be the number of foreground classes. # Chugging through the math, this gives us # x_0 = log(z * (1 - pi) / pi) where 0 is the background class # x_i = log(z / c) for all i > 0 # For simplicity (and because we have a degree of freedom here), set z = 1. Then we have # x_0 = log((1 - pi) / pi) note: don't split up the log for numerical stability # x_i = -log(c) for all i > 0 module.bias.data[0] = np.log((1 - cfg.focal_loss_init_pi) / cfg.focal_loss_init_pi) module.bias.data[1:] = -np.log(module.bias.size(0) - 1) else: module.bias.data[0] = -np.log(cfg.focal_loss_init_pi / (1 - cfg.focal_loss_init_pi)) module.bias.data[1:] = -np.log((1 - cfg.focal_loss_init_pi) / cfg.focal_loss_init_pi) else: module.bias.data.zero_() def train(self, mode=True): super().train(mode) if cfg.freeze_bn: self.freeze_bn() def freeze_bn(self, enable=False): """ Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 """ for module in self.modules(): if isinstance(module, nn.BatchNorm2d): module.train() if enable else module.eval() module.weight.requires_grad = enable module.bias.requires_grad = enable def forward(self, x): """ The input should be of size [batch_size, 3, img_h, img_w] """ _, _, img_h, img_w = x.size() cfg._tmp_img_h = img_h cfg._tmp_img_w = img_w with timer.env('backbone'): outs = self.backbone(x) if cfg.fpn is not None: with timer.env('fpn'): # Use backbone.selected_layers because we overwrote self.selected_layers outs = [outs[i] for i in cfg.backbone.selected_layers] outs = self.fpn(outs) proto_out = None if cfg.mask_type == mask_type.lincomb and cfg.eval_mask_branch: with timer.env('proto'): proto_x = x if self.proto_src is None else outs[self.proto_src] if self.num_grids > 0: grids = self.grid.repeat(proto_x.size(0), 1, 1, 1) proto_x = torch.cat([proto_x, grids], dim=1) proto_out = self.proto_net(proto_x) proto_out = cfg.mask_proto_prototype_activation(proto_out) if cfg.mask_proto_prototypes_as_features: # Clone here because we don't want to permute this, though idk if contiguous makes this unnecessary proto_downsampled = proto_out.clone() if cfg.mask_proto_prototypes_as_features_no_grad: proto_downsampled = proto_out.detach() # Move the features last so the multiplication is easy proto_out = proto_out.permute(0, 2, 3, 1).contiguous() if cfg.mask_proto_bias: bias_shape = [x for x in proto_out.size()] bias_shape[-1] = 1 proto_out = torch.cat([proto_out, torch.ones(*bias_shape)], -1) with timer.env('pred_heads'): pred_outs = { 'loc': [], 'conf': [], 'mask': [], 'priors': [] } if cfg.use_mask_scoring: pred_outs['score'] = [] if cfg.use_instance_coeff: pred_outs['inst'] = [] for idx, pred_layer in zip(self.selected_layers, self.prediction_layers): pred_x = outs[idx] if cfg.mask_type == mask_type.lincomb and cfg.mask_proto_prototypes_as_features: # Scale the prototypes down to the current prediction layer's size and add it as inputs proto_downsampled = F.interpolate(proto_downsampled, size=outs[idx].size()[2:], mode='bilinear', align_corners=False) pred_x = torch.cat([pred_x, proto_downsampled], dim=1) # A hack for the way dataparallel works if cfg.share_prediction_module and pred_layer is not self.prediction_layers[0]: pred_layer.parent = [self.prediction_layers[0]] if self.only_last_layer: p = pred_layer(pred_x.detach()) else: p = pred_layer(pred_x) for k, v in p.items(): pred_outs[k].append(v) for k, v in pred_outs.items(): pred_outs[k] = torch.cat(v, -2) if proto_out is not None: pred_outs['proto'] = proto_out if self.training: # For the extra loss functions if cfg.use_class_existence_loss: pred_outs['classes'] = self.class_existence_fc(outs[-1].mean(dim=(2, 3))) if cfg.use_semantic_segmentation_loss: pred_outs['segm'] = self.semantic_seg_conv(outs[0]) return pred_outs else: if cfg.use_mask_scoring: pred_outs['score'] = torch.sigmoid(pred_outs['score']) if cfg.use_focal_loss: if cfg.use_sigmoid_focal_loss: # Note: even though conf[0] exists, this mode doesn't train it so don't use it pred_outs['conf'] = torch.sigmoid(pred_outs['conf']) if cfg.use_mask_scoring: pred_outs['conf'] *= pred_outs['score'] elif cfg.use_objectness_score: # See focal_loss_sigmoid in multibox_loss.py for details objectness = torch.sigmoid(pred_outs['conf'][:, :, 0]) pred_outs['conf'][:, :, 1:] = objectness[:, :, None] * F.softmax(pred_outs['conf'][:, :, 1:], -1) pred_outs['conf'][:, :, 0 ] = 1 - objectness else: pred_outs['conf'] = F.softmax(pred_outs['conf'], -1) else: if cfg.use_objectness_score: objectness = torch.sigmoid(pred_outs['conf'][:, :, 0]) pred_outs['conf'][:, :, 1:] = (objectness > 0.10)[..., None] \ * F.softmax(pred_outs['conf'][:, :, 1:], dim=-1) else: pred_outs['conf'] = F.softmax(pred_outs['conf'], -1) return self.detect(pred_outs, self) # Some testing code if __name__ == '__main__': from utils.functions import init_console init_console() # Use the first argument to set the config if you want import sys if len(sys.argv) > 1: from data.config import set_cfg set_cfg(sys.argv[1]) net = Yolact() net.train() net.init_weights(backbone_path='weights/' + cfg.backbone.path) # GPU net = net.cuda() torch.set_default_tensor_type('torch.cuda.FloatTensor') x = torch.zeros((1, 3, cfg.max_size, cfg.max_size)) y = net(x) for p in net.prediction_layers: print(p.last_conv_size) print() for k, a in y.items(): print(k + ': ', a.size(), torch.sum(a)) exit() net(x) # timer.disable('pass2') avg = MovingAverage() try: while True: timer.reset() with timer.env('everything else'): net(x) avg.add(timer.total_time()) print('\033[2J') # Moves console cursor to 0,0 timer.print_stats() print('Avg fps: %.2f\tAvg ms: %.2f ' % (1/avg.get_avg(), avg.get_avg()*1000)) except KeyboardInterrupt: pass
_type.lincomb: if cfg.mask_proto_use_grid: self.grid = torch.Tensor(np.load(cfg.mask_proto_grid_file)) self.num_grids = self.grid.size(0) else: self.num_grids = 0 self.proto_src = cfg.mask_proto_src if self.proto_src is None: in_channels = 3 elif cfg.fpn is not None: in_channels = cfg.fpn.num_features else: in_channels = self.backbone.channels[self.proto_src] in_channels += self.num_grids # The include_last_relu=false here is because we might want to change it to another function self.proto_net, cfg.mask_dim = make_net(in_channels, cfg.mask_proto_net, include_last_relu=False) if cfg.mask_proto_bias: cfg.mask_dim += 1 self.selected_layers = cfg.backbone.selected_layers src_channels = self.backbone.channels if cfg.use_maskiou: self.maskiou_net = FastMaskIoUNet() if cfg.fpn is not None: # Some hacky rewiring to accomodate the FPN self.fpn = FPN([src_channels[i] for i in self.selected_layers]) self.selected_layers = list(range(len(self.selected_layers) + cfg.fpn.num_downsample)) src_channels = [cfg.fpn.num_features] * len(self.selected_layers) self.prediction_layers = nn.ModuleList() cfg.num_heads = len(self.selected_layers) for idx, layer_idx in enumerate(self.selected_layers): # If we're sharing prediction module weights, have every module's parent be the first one parent = None if cfg.share_prediction_module and idx > 0: parent = self.prediction_layers[0] pred = PredictionModule(src_channels[layer_idx], src_channels[layer_idx], aspect_ratios = cfg.backbone.pred_aspect_ratios[idx], scales = cfg.backbone.pred_scales[idx], parent = parent, index = idx) self.prediction_layers.append(pred) # Extra parameters for the extra losses if cfg.use_class_existence_loss: # This comes from the smallest layer selected # Also note that cfg.num_classes includes background self.class_existence_fc = nn.Linear(src_channels[-1], cfg.num_classes - 1) if cfg.use_semantic_segmentation_loss: self.semantic_seg_conv = nn.Conv2d(src_channels[0], cfg.num_classes-1, kernel_size=1) # For use in evaluation self.detect = Detect(cfg.num_classes, bkg_label=0, top_k=cfg.nms_top_k, conf_thresh=cfg.nms_conf_thresh, nms_thresh=cfg.nms_thresh) def save_weights(self, path): """ Saves the model's weights using compression because the file sizes were getting too big. """ torch.save(self.state_dict(), path) def load_weights(self, path): """ Loads weights from a compressed save file. """ state_dict = torch.load(path) # For backward compatability, remove these (the new variable is called layers) for key
scan_error.go
// Code generated by go-swagger; DO NOT EDIT. package models // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "github.com/go-openapi/errors" strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/go-openapi/validate" ) // ScanError error reported by SCA during the scan // swagger:model scan error type ScanError struct { // error code // Required: true // Read Only: true ErrorCode string `json:"errorCode"` // error description // Required: true // Read Only: true ErrorDescription string `json:"errorDescription"` // id // Required: true // Read Only: true ID int64 `json:"id"` // id of scan associated with the error // Required: true // Read Only: true ScanID int64 `json:"scanId"` }
// Validate validates this scan error func (m *ScanError) Validate(formats strfmt.Registry) error { var res []error if err := m.validateErrorCode(formats); err != nil { res = append(res, err) } if err := m.validateErrorDescription(formats); err != nil { res = append(res, err) } if err := m.validateID(formats); err != nil { res = append(res, err) } if err := m.validateScanID(formats); err != nil { res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *ScanError) validateErrorCode(formats strfmt.Registry) error { if err := validate.RequiredString("errorCode", "body", string(m.ErrorCode)); err != nil { return err } return nil } func (m *ScanError) validateErrorDescription(formats strfmt.Registry) error { if err := validate.RequiredString("errorDescription", "body", string(m.ErrorDescription)); err != nil { return err } return nil } func (m *ScanError) validateID(formats strfmt.Registry) error { if err := validate.Required("id", "body", int64(m.ID)); err != nil { return err } return nil } func (m *ScanError) validateScanID(formats strfmt.Registry) error { if err := validate.Required("scanId", "body", int64(m.ScanID)); err != nil { return err } return nil } // MarshalBinary interface implementation func (m *ScanError) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *ScanError) UnmarshalBinary(b []byte) error { var res ScanError if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil }
ddd1.go
// errorcheck // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Verify that illegal uses of ... are detected. // Does not compile. package main import "unsafe" func sum(args ...int) int { return 0 } var ( _ = sum(1, 2, 3) _ = sum() _ = sum(1.0, 2.0) _ = sum(1.5) // ERROR "1\.5 .untyped float constant. as int|integer" _ = sum("hello") // ERROR ".hello. (.untyped string constant. as int|.type untyped string. as type int)|incompatible" _ = sum([]int{1}) // ERROR "\[\]int{...}.*as type int|incompatible" ) func sum3(int, int, int) int { return 0 } func tuple() (int, int, int) { return 1, 2, 3 } var ( _ = sum(tuple()) _ = sum(tuple()...) // ERROR "\.{3} with 3-valued|multiple-value" _ = sum3(tuple()) _ = sum3(tuple()...) // ERROR "\.{3} in call to non-variadic|multiple-value|invalid use of .*[.][.][.]" ) type T []T func funny(args ...T) int { return 0 } var ( _ = funny(nil) _ = funny(nil, nil) _ = funny([]T{}) // ok because []T{} is a T; passes []T{[]T{}} ) func
(n int) {} func bad(args ...int) { print(1, 2, args...) // ERROR "[.][.][.]" println(args...) // ERROR "[.][.][.]" ch := make(chan int) close(ch...) // ERROR "[.][.][.]" _ = len(args...) // ERROR "[.][.][.]" _ = new(int...) // ERROR "[.][.][.]" n := 10 _ = make([]byte, n...) // ERROR "[.][.][.]" _ = make([]byte, 10 ...) // ERROR "[.][.][.]" var x int _ = unsafe.Pointer(&x...) // ERROR "[.][.][.]" _ = unsafe.Sizeof(x...) // ERROR "[.][.][.]" _ = [...]byte("foo") // ERROR "[.][.][.]" _ = [...][...]int{{1,2,3},{4,5,6}} // ERROR "[.][.][.]" Foo(x...) // ERROR "\.{3} in call to non-variadic|invalid use of .*[.][.][.]" }
Foo