hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
28e47aa6f3c77831f5f80d1e6d8d45ce4e04870e
2,564
use crate::util::is_valid_ident; use swc_ecma_ast::*; use swc_ecma_visit::{Fold, FoldWith}; /// babel: `transform-member-expression-literals` /// /// # Input /// ```js /// obj["foo"] = "isValid"; /// /// obj.const = "isKeyword"; /// obj["var"] = "isKeyword"; /// ``` /// /// # Output /// ```js /// obj.foo = "isValid"; /// /// obj["const"] = "isKeyword"; /// obj["var"] = "isKeyword"; /// ``` #[derive(Default, Clone, Copy)] pub struct MemberExprLit; noop_fold_type!(MemberExprLit); impl Fold for MemberExprLit { fn fold_member_expr(&mut self, e: MemberExpr) -> MemberExpr { let mut e = validate!(e.fold_children_with(self)); macro_rules! handle { ($sym:expr, $span:expr) => { if $sym.is_reserved_for_es3() || !is_valid_ident(&$sym) { return MemberExpr { computed: true, prop: Box::new(Expr::Lit(Lit::Str(Str { span: $span, value: $sym, has_escape: false, }))), ..e }; } else { return MemberExpr { computed: false, prop: Box::new(Expr::Ident(quote_ident!($span, $sym))), ..e }; } }; } e.prop = match *e.prop { Expr::Ident(i) => { if e.computed { Box::new(Expr::Ident(i)) } else { handle!(i.sym, i.span) } } _ => e.prop, }; e } fn fold_module(&mut self, node: Module) -> Module { validate!(node.fold_children_with(self)) } } #[cfg(test)] mod tests { use super::*; test!( ::swc_ecma_parser::Syntax::default(), |_| MemberExprLit, basic, r#"obj["foo"] = "isValid"; obj.const = "isKeyword"; obj["var"] = "isKeyword";"#, r#"obj["foo"] = "isValid"; obj["const"] = "isKeyword"; obj["var"] = "isKeyword";"# ); test!( ::swc_ecma_parser::Syntax::default(), |_| MemberExprLit, issue_206, "const number = foo[bar1][baz1]", "const number = foo[bar1][baz1]" ); test!( ::swc_ecma_parser::Syntax::default(), |_| MemberExprLit, issue_211, "_query[idx]=$this.attr('data-ref');", "_query[idx]=$this.attr('data-ref');" ); }
24.188679
79
0.443838
c1aac6bec6635665aafa9490724e5b39874a8875
1,261
extern crate lttng_ust_generate; use std::env; use std::path::PathBuf; use lttng_ust_generate::{Provider, CTFType, CIntegerType, Generator, LogLevel}; fn main() { let mut provider = Provider::new("rust_logging"); { let log_entry_class = provider.create_class("log_entry") .add_field("file", CTFType::SequenceText) .add_field("line", CTFType::Integer(CIntegerType::U32)) .add_field("module_path", CTFType::SequenceText) .add_field("target", CTFType::SequenceText) .add_field("message", CTFType::SequenceText); log_entry_class.instantiate_with_level("trace",LogLevel::Debug); log_entry_class.instantiate_with_level("debug",LogLevel::DebugLine); log_entry_class.instantiate_with_level("info" ,LogLevel::Info); log_entry_class.instantiate_with_level("warn" ,LogLevel::Warning); log_entry_class.instantiate_with_level("error",LogLevel::Error); } Generator::default() .generated_lib_name("rust_lttng_logging") .register_provider(provider) .output_file_name(PathBuf::from(env::var("OUT_DIR").unwrap()).join("logging_tracepoints.rs")) .generate() .expect("Unable to generate tracepoint bindings"); }
39.40625
101
0.686757
d7160bb5efd447e6324a263c892f8da38b949bc8
14,502
// This file is generated by rust-protobuf 2.8.1. Do not edit // @generated // https://github.com/Manishearth/rust-clippy/issues/702 #![allow(unknown_lints)] #![allow(clippy::all)] #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(box_pointers)] #![allow(dead_code)] #![allow(missing_docs)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(non_upper_case_globals)] #![allow(trivial_casts)] #![allow(unsafe_code)] #![allow(unused_imports)] #![allow(unused_results)] //! Generated file from `github.com/tendermint/tendermint/libs/common/types.proto` use protobuf::Message as Message_imported_for_functions; use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions; /// Generated files are compatible only with the same version /// of protobuf runtime. const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_8_1; #[derive(PartialEq,Clone,Default)] pub struct KVPair { // message fields pub key: ::std::vec::Vec<u8>, pub value: ::std::vec::Vec<u8>, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, } impl<'a> ::std::default::Default for &'a KVPair { fn default() -> &'a KVPair { <KVPair as ::protobuf::Message>::default_instance() } } impl KVPair { pub fn new() -> KVPair { ::std::default::Default::default() } // bytes key = 1; pub fn get_key(&self) -> &[u8] { &self.key } pub fn clear_key(&mut self) { self.key.clear(); } // Param is passed by value, moved pub fn set_key(&mut self, v: ::std::vec::Vec<u8>) { self.key = v; } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_key(&mut self) -> &mut ::std::vec::Vec<u8> { &mut self.key } // Take field pub fn take_key(&mut self) -> ::std::vec::Vec<u8> { ::std::mem::replace(&mut self.key, ::std::vec::Vec::new()) } // bytes value = 2; pub fn get_value(&self) -> &[u8] { &self.value } pub fn clear_value(&mut self) { self.value.clear(); } // Param is passed by value, moved pub fn set_value(&mut self, v: ::std::vec::Vec<u8>) { self.value = v; } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_value(&mut self) -> &mut ::std::vec::Vec<u8> { &mut self.value } // Take field pub fn take_value(&mut self) -> ::std::vec::Vec<u8> { ::std::mem::replace(&mut self.value, ::std::vec::Vec::new()) } } impl ::protobuf::Message for KVPair { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.key)?; }, 2 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.value)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if !self.key.is_empty() { my_size += ::protobuf::rt::bytes_size(1, &self.key); } if !self.value.is_empty() { my_size += ::protobuf::rt::bytes_size(2, &self.value); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { if !self.key.is_empty() { os.write_bytes(1, &self.key)?; } if !self.value.is_empty() { os.write_bytes(2, &self.value)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &dyn (::std::any::Any) { self as &dyn (::std::any::Any) } fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { self as &mut dyn (::std::any::Any) } fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { Self::descriptor_static() } fn new() -> KVPair { KVPair::new() } fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( "key", |m: &KVPair| { &m.key }, |m: &mut KVPair| { &mut m.key }, )); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( "value", |m: &KVPair| { &m.value }, |m: &mut KVPair| { &mut m.value }, )); ::protobuf::reflect::MessageDescriptor::new::<KVPair>( "KVPair", fields, file_descriptor_proto() ) }) } } fn default_instance() -> &'static KVPair { static mut instance: ::protobuf::lazy::Lazy<KVPair> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const KVPair, }; unsafe { instance.get(KVPair::new) } } } impl ::protobuf::Clear for KVPair { fn clear(&mut self) { self.key.clear(); self.value.clear(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for KVPair { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for KVPair { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct KI64Pair { // message fields pub key: ::std::vec::Vec<u8>, pub value: i64, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, } impl<'a> ::std::default::Default for &'a KI64Pair { fn default() -> &'a KI64Pair { <KI64Pair as ::protobuf::Message>::default_instance() } } impl KI64Pair { pub fn new() -> KI64Pair { ::std::default::Default::default() } // bytes key = 1; pub fn get_key(&self) -> &[u8] { &self.key } pub fn clear_key(&mut self) { self.key.clear(); } // Param is passed by value, moved pub fn set_key(&mut self, v: ::std::vec::Vec<u8>) { self.key = v; } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_key(&mut self) -> &mut ::std::vec::Vec<u8> { &mut self.key } // Take field pub fn take_key(&mut self) -> ::std::vec::Vec<u8> { ::std::mem::replace(&mut self.key, ::std::vec::Vec::new()) } // int64 value = 2; pub fn get_value(&self) -> i64 { self.value } pub fn clear_value(&mut self) { self.value = 0; } // Param is passed by value, moved pub fn set_value(&mut self, v: i64) { self.value = v; } } impl ::protobuf::Message for KI64Pair { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.key)?; }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int64()?; self.value = tmp; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if !self.key.is_empty() { my_size += ::protobuf::rt::bytes_size(1, &self.key); } if self.value != 0 { my_size += ::protobuf::rt::value_size(2, self.value, ::protobuf::wire_format::WireTypeVarint); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { if !self.key.is_empty() { os.write_bytes(1, &self.key)?; } if self.value != 0 { os.write_int64(2, self.value)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &dyn (::std::any::Any) { self as &dyn (::std::any::Any) } fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { self as &mut dyn (::std::any::Any) } fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { Self::descriptor_static() } fn new() -> KI64Pair { KI64Pair::new() } fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( "key", |m: &KI64Pair| { &m.key }, |m: &mut KI64Pair| { &mut m.key }, )); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>( "value", |m: &KI64Pair| { &m.value }, |m: &mut KI64Pair| { &mut m.value }, )); ::protobuf::reflect::MessageDescriptor::new::<KI64Pair>( "KI64Pair", fields, file_descriptor_proto() ) }) } } fn default_instance() -> &'static KI64Pair { static mut instance: ::protobuf::lazy::Lazy<KI64Pair> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const KI64Pair, }; unsafe { instance.get(KI64Pair::new) } } } impl ::protobuf::Clear for KI64Pair { fn clear(&mut self) { self.key.clear(); self.value = 0; self.unknown_fields.clear(); } } impl ::std::fmt::Debug for KI64Pair { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for KI64Pair { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } static file_descriptor_proto_data: &'static [u8] = b"\ \n8github.com/tendermint/tendermint/libs/common/types.proto\x12\x06commo\ n\"*\n\x06KVPair\x12\r\n\x03key\x18\x01\x20\x01(\x0cB\0\x12\x0f\n\x05val\ ue\x18\x02\x20\x01(\x0cB\0:\0\",\n\x08KI64Pair\x12\r\n\x03key\x18\x01\ \x20\x01(\x0cB\0\x12\x0f\n\x05value\x18\x02\x20\x01(\x03B\0:\0B\x1c\xd0\ \xe2\x1e\x01\xb8\xe2\x1e\x01\xa8\xe2\x1e\x01\xe0\xe2\x1e\x01\xc0\xe3\x1e\ \x01\xc8\xe2\x1e\x01\xf8\xe1\x1e\x01b\x06proto3\ "; static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto, }; fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { ::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap() } pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { unsafe { file_descriptor_proto_lazy.get(|| { parse_descriptor_proto() }) } }
30.921109
133
0.555716
2f6cbd78e36aee5c9b7fdc02af632d257c95cfe9
1,124
use gimli; use std::fmt::{Debug, Formatter, Result as FmtResult}; use std::ops::{Index, IndexMut}; #[derive(Default, Clone, PartialEq, Eq)] pub struct Registers { registers: [Option<u64>; 17], } impl Debug for Registers { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { for reg in &self.registers { match *reg { None => write!(fmt, " XXX")?, Some(x) => write!(fmt, " 0x{:x}", x)?, } } Ok(()) } } impl Index<u16> for Registers { type Output = Option<u64>; fn index(&self, index: u16) -> &Option<u64> { &self.registers[index as usize] } } impl IndexMut<u16> for Registers { fn index_mut(&mut self, index: u16) -> &mut Option<u64> { &mut self.registers[index as usize] } } impl Index<gimli::Register> for Registers { type Output = Option<u64>; fn index(&self, reg: gimli::Register) -> &Option<u64> { &self[reg.0] } } impl IndexMut<gimli::Register> for Registers { fn index_mut(&mut self, reg: gimli::Register) -> &mut Option<u64> { &mut self[reg.0] } }
22.938776
71
0.564057
646c8bc79cbc0c619d14fe2171b5fd7b8224bf41
2,030
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![cfg_attr(not(feature = "std"), no_std)] use ink_lang as ink; #[ink::contract] pub mod flipper { #[ink(storage)] pub struct Flipper { value: bool, } impl Flipper { /// Creates a new flipper smart contract initialized with the given value. #[ink(constructor)] pub fn new(init_value: bool) -> Self { Self { value: init_value } } /// Creates a new flipper smart contract initialized to `false`. #[ink(constructor)] pub fn default() -> Self { Self::new(Default::default()) } /// Flips the current value of the Flipper's bool. #[ink(message)] pub fn flip(&mut self) { self.value = !self.value; } /// Returns the current value of the Flipper's bool. #[ink(message)] pub fn get(&self) -> bool { self.value } } #[cfg(test)] mod tests { use super::*; #[test] fn default_works() { let flipper = Flipper::default(); assert_eq!(flipper.get(), false); } #[test] fn it_works() { let mut flipper = Flipper::new(false); assert_eq!(flipper.get(), false); flipper.flip(); assert_eq!(flipper.get(), true); } } }
28.591549
83
0.557635
4843ac233ef1873099d28d53a6aea12158a8ba54
5,543
// Copyright 2020 IOTA Stiftung // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on // an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and limitations under the License. use crate::{Btrit, RawEncoding, RawEncodingBuf, ShiftTernary, Utrit}; use std::ops::Range; // Trits Per Byte const TPB: usize = 5; // Number required to push a byte between balanced and unbalanced representations const BAL: i8 = 121; /// An encoding scheme slice that uses a single byte to represent five trits. #[repr(transparent)] pub struct T5B1([()]); impl T5B1 { unsafe fn make(ptr: *const i8, offset: usize, len: usize) -> *const Self { let len = (len << 3) | (offset % TPB); std::mem::transmute((ptr.add(offset / TPB), len)) } unsafe fn ptr(&self, index: usize) -> *const i8 { let byte_offset = (self.len_offset().1 + index) / TPB; (self.0.as_ptr() as *const i8).add(byte_offset) } fn len_offset(&self) -> (usize, usize) { (self.0.len() >> 3, self.0.len() & 0b111) } } fn extract(x: i8, elem: usize) -> Btrit { debug_assert!( elem < TPB, "Attempted to extract invalid element {} from balanced T5B1 trit", elem ); Utrit::from_u8((((x as i16 + BAL as i16) / 3i16.pow(elem as u32)) % 3) as u8).shift() } fn insert(x: i8, elem: usize, trit: Btrit) -> i8 { debug_assert!( elem < TPB, "Attempted to insert invalid element {} into balanced T5B1 trit", elem ); let utrit = trit.shift(); let ux = x as i16 + BAL as i16; let ux = ux + (utrit.into_u8() as i16 - (ux / 3i16.pow(elem as u32)) % 3) * 3i16.pow(elem as u32); (ux - BAL as i16) as i8 } impl RawEncoding for T5B1 { type Trit = Btrit; type Buf = T5B1Buf; fn empty() -> &'static Self { unsafe { &*Self::make(&[] as *const _, 0, 0) } } fn len(&self) -> usize { self.len_offset().0 } fn as_i8_slice(&self) -> &[i8] { assert!(self.len_offset().1 == 0); unsafe { std::slice::from_raw_parts(self as *const _ as *const _, (self.len() + TPB - 1) / TPB) } } unsafe fn as_i8_slice_mut(&mut self) -> &mut [i8] { assert!(self.len_offset().1 == 0); std::slice::from_raw_parts_mut(self as *mut _ as *mut _, (self.len() + TPB - 1) / TPB) } unsafe fn get_unchecked(&self, index: usize) -> Self::Trit { let b = self.ptr(index).read(); extract(b, (self.len_offset().1 + index) % TPB) } unsafe fn set_unchecked(&mut self, index: usize, trit: Self::Trit) { let b = self.ptr(index).read(); let b = insert(b, (self.len_offset().1 + index) % TPB, trit); (self.ptr(index) as *mut i8).write(b); } unsafe fn slice_unchecked(&self, range: Range<usize>) -> &Self { &*Self::make( self.ptr(range.start), (self.len_offset().1 + range.start) % TPB, range.end - range.start, ) } unsafe fn slice_unchecked_mut(&mut self, range: Range<usize>) -> &mut Self { &mut *(Self::make( self.ptr(range.start), (self.len_offset().1 + range.start) % TPB, range.end - range.start, ) as *mut Self) } fn is_valid(b: i8) -> bool { b >= -BAL && b <= BAL } unsafe fn from_raw_unchecked(b: &[i8], num_trits: usize) -> &Self { assert!(num_trits <= b.len() * TPB); &*Self::make(b.as_ptr() as *const _, 0, num_trits) } unsafe fn from_raw_unchecked_mut(b: &mut [i8], num_trits: usize) -> &mut Self { assert!(num_trits <= b.len() * TPB); &mut *(Self::make(b.as_ptr() as *const _, 0, num_trits) as *mut _) } } /// An encoding scheme buffer that uses a single byte to represent five trits. #[derive(Clone)] pub struct T5B1Buf(Vec<i8>, usize); impl RawEncodingBuf for T5B1Buf { type Slice = T5B1; fn new() -> Self { Self(Vec::new(), 0) } fn with_capacity(cap: usize) -> Self { let cap = (cap / TPB) + (cap % TPB != 0) as usize; Self(Vec::with_capacity(cap), 0) } fn push(&mut self, trit: <Self::Slice as RawEncoding>::Trit) { if self.1 % TPB == 0 { self.0.push(insert(0, 0, trit)); } else { let last_index = self.0.len() - 1; let b = unsafe { self.0.get_unchecked_mut(last_index) }; *b = insert(*b, self.1 % TPB, trit); } self.1 += 1; } fn pop(&mut self) -> Option<<Self::Slice as RawEncoding>::Trit> { let val = if self.1 == 0 { return None; } else if self.1 % TPB == 1 { self.0.pop().map(|b| extract(b, 0)) } else { let last_index = self.0.len() - 1; unsafe { Some(extract(*self.0.get_unchecked(last_index), (self.1 + TPB - 1) % TPB)) } }; self.1 -= 1; val } fn as_slice(&self) -> &Self::Slice { unsafe { &*Self::Slice::make(self.0.as_ptr() as _, 0, self.1) } } fn as_slice_mut(&mut self) -> &mut Self::Slice { unsafe { &mut *(Self::Slice::make(self.0.as_ptr() as _, 0, self.1) as *mut _) } } }
32.040462
118
0.567021
bf0be54d326d811a065ce20961d174cab477dc7b
96,665
use super::{ BlockType, CountedList, CountedListWriter, Deserialize, Error, Serialize, Uint32, Uint64, Uint8, VarInt32, VarInt64, VarUint32, }; use crate::io; use alloc::{boxed::Box, vec::Vec}; use core::fmt; /// List of instructions (usually inside a block section). #[derive(Debug, Clone, PartialEq)] pub struct Instructions(Vec<Instruction>); impl Instructions { /// New list of instructions from vector of instructions. pub fn new(elements: Vec<Instruction>) -> Self { Instructions(elements) } /// Empty expression with only `Instruction::End` instruction. pub fn empty() -> Self { Instructions(vec![Instruction::End]) } /// List of individual instructions. pub fn elements(&self) -> &[Instruction] { &self.0 } /// Individual instructions, mutable. pub fn elements_mut(&mut self) -> &mut Vec<Instruction> { &mut self.0 } } impl Deserialize for Instructions { type Error = Error; fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> { let mut instructions = Vec::new(); let mut block_count = 1usize; loop { let instruction = Instruction::deserialize(reader)?; if instruction.is_terminal() { block_count -= 1; } else if instruction.is_block() { block_count = block_count.checked_add(1).ok_or(Error::Other("too many instructions"))?; } instructions.push(instruction); if block_count == 0 { break } } Ok(Instructions(instructions)) } } /// Initialization expression. #[derive(Debug, Clone, PartialEq)] pub struct InitExpr(Vec<Instruction>); impl InitExpr { /// New initialization expression from instruction list. /// /// `code` must end with the `Instruction::End` instruction! pub fn new(code: Vec<Instruction>) -> Self { InitExpr(code) } /// Empty expression with only `Instruction::End` instruction. pub fn empty() -> Self { InitExpr(vec![Instruction::End]) } /// List of instructions used in the expression. pub fn code(&self) -> &[Instruction] { &self.0 } /// List of instructions used in the expression. pub fn code_mut(&mut self) -> &mut Vec<Instruction> { &mut self.0 } } impl Deserialize for InitExpr { type Error = Error; fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> { let mut instructions = Vec::new(); loop { let instruction = Instruction::deserialize(reader)?; let is_terminal = instruction.is_terminal(); instructions.push(instruction); if is_terminal { break } } Ok(InitExpr(instructions)) } } /// Instruction. #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[allow(missing_docs)] pub enum Instruction { Unreachable, Nop, Block(BlockType), Loop(BlockType), If(BlockType), Else, End, Br(u32), BrIf(u32), BrTable(Box<BrTableData>), Return, Call(u32), CallIndirect(u32, u8), Drop, Select, GetLocal(u32), SetLocal(u32), TeeLocal(u32), GetGlobal(u32), SetGlobal(u32), // All store/load instructions operate with 'memory immediates' // which represented here as (flag, offset) tuple I32Load(u32, u32), I64Load(u32, u32), F32Load(u32, u32), F64Load(u32, u32), I32Load8S(u32, u32), I32Load8U(u32, u32), I32Load16S(u32, u32), I32Load16U(u32, u32), I64Load8S(u32, u32), I64Load8U(u32, u32), I64Load16S(u32, u32), I64Load16U(u32, u32), I64Load32S(u32, u32), I64Load32U(u32, u32), I32Store(u32, u32), I64Store(u32, u32), F32Store(u32, u32), F64Store(u32, u32), I32Store8(u32, u32), I32Store16(u32, u32), I64Store8(u32, u32), I64Store16(u32, u32), I64Store32(u32, u32), CurrentMemory(u8), GrowMemory(u8), I32Const(i32), I64Const(i64), F32Const(u32), F64Const(u64), I32Eqz, I32Eq, I32Ne, I32LtS, I32LtU, I32GtS, I32GtU, I32LeS, I32LeU, I32GeS, I32GeU, I64Eqz, I64Eq, I64Ne, I64LtS, I64LtU, I64GtS, I64GtU, I64LeS, I64LeU, I64GeS, I64GeU, F32Eq, F32Ne, F32Lt, F32Gt, F32Le, F32Ge, F64Eq, F64Ne, F64Lt, F64Gt, F64Le, F64Ge, I32Clz, I32Ctz, I32Popcnt, I32Add, I32Sub, I32Mul, I32DivS, I32DivU, I32RemS, I32RemU, I32And, I32Or, I32Xor, I32Shl, I32ShrS, I32ShrU, I32Rotl, I32Rotr, I64Clz, I64Ctz, I64Popcnt, I64Add, I64Sub, I64Mul, I64DivS, I64DivU, I64RemS, I64RemU, I64And, I64Or, I64Xor, I64Shl, I64ShrS, I64ShrU, I64Rotl, I64Rotr, F32Abs, F32Neg, F32Ceil, F32Floor, F32Trunc, F32Nearest, F32Sqrt, F32Add, F32Sub, F32Mul, F32Div, F32Min, F32Max, F32Copysign, F64Abs, F64Neg, F64Ceil, F64Floor, F64Trunc, F64Nearest, F64Sqrt, F64Add, F64Sub, F64Mul, F64Div, F64Min, F64Max, F64Copysign, I32WrapI64, I32TruncSF32, I32TruncUF32, I32TruncSF64, I32TruncUF64, I64ExtendSI32, I64ExtendUI32, I64TruncSF32, I64TruncUF32, I64TruncSF64, I64TruncUF64, F32ConvertSI32, F32ConvertUI32, F32ConvertSI64, F32ConvertUI64, F32DemoteF64, F64ConvertSI32, F64ConvertUI32, F64ConvertSI64, F64ConvertUI64, F64PromoteF32, I32ReinterpretF32, I64ReinterpretF64, F32ReinterpretI32, F64ReinterpretI64, #[cfg(feature = "atomics")] Atomics(AtomicsInstruction), #[cfg(feature = "simd")] Simd(SimdInstruction), #[cfg(feature = "sign_ext")] SignExt(SignExtInstruction), #[cfg(feature = "bulk")] Bulk(BulkInstruction), #[cfg(feature = "tail_calls")] ReturnCall(u32), #[cfg(feature = "tail_calls")] ReturnCallIndirect(u32, u8), } #[allow(missing_docs)] #[cfg(feature = "atomics")] #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum AtomicsInstruction { AtomicWake(MemArg), I32AtomicWait(MemArg), I64AtomicWait(MemArg), I32AtomicLoad(MemArg), I64AtomicLoad(MemArg), I32AtomicLoad8u(MemArg), I32AtomicLoad16u(MemArg), I64AtomicLoad8u(MemArg), I64AtomicLoad16u(MemArg), I64AtomicLoad32u(MemArg), I32AtomicStore(MemArg), I64AtomicStore(MemArg), I32AtomicStore8u(MemArg), I32AtomicStore16u(MemArg), I64AtomicStore8u(MemArg), I64AtomicStore16u(MemArg), I64AtomicStore32u(MemArg), I32AtomicRmwAdd(MemArg), I64AtomicRmwAdd(MemArg), I32AtomicRmwAdd8u(MemArg), I32AtomicRmwAdd16u(MemArg), I64AtomicRmwAdd8u(MemArg), I64AtomicRmwAdd16u(MemArg), I64AtomicRmwAdd32u(MemArg), I32AtomicRmwSub(MemArg), I64AtomicRmwSub(MemArg), I32AtomicRmwSub8u(MemArg), I32AtomicRmwSub16u(MemArg), I64AtomicRmwSub8u(MemArg), I64AtomicRmwSub16u(MemArg), I64AtomicRmwSub32u(MemArg), I32AtomicRmwAnd(MemArg), I64AtomicRmwAnd(MemArg), I32AtomicRmwAnd8u(MemArg), I32AtomicRmwAnd16u(MemArg), I64AtomicRmwAnd8u(MemArg), I64AtomicRmwAnd16u(MemArg), I64AtomicRmwAnd32u(MemArg), I32AtomicRmwOr(MemArg), I64AtomicRmwOr(MemArg), I32AtomicRmwOr8u(MemArg), I32AtomicRmwOr16u(MemArg), I64AtomicRmwOr8u(MemArg), I64AtomicRmwOr16u(MemArg), I64AtomicRmwOr32u(MemArg), I32AtomicRmwXor(MemArg), I64AtomicRmwXor(MemArg), I32AtomicRmwXor8u(MemArg), I32AtomicRmwXor16u(MemArg), I64AtomicRmwXor8u(MemArg), I64AtomicRmwXor16u(MemArg), I64AtomicRmwXor32u(MemArg), I32AtomicRmwXchg(MemArg), I64AtomicRmwXchg(MemArg), I32AtomicRmwXchg8u(MemArg), I32AtomicRmwXchg16u(MemArg), I64AtomicRmwXchg8u(MemArg), I64AtomicRmwXchg16u(MemArg), I64AtomicRmwXchg32u(MemArg), I32AtomicRmwCmpxchg(MemArg), I64AtomicRmwCmpxchg(MemArg), I32AtomicRmwCmpxchg8u(MemArg), I32AtomicRmwCmpxchg16u(MemArg), I64AtomicRmwCmpxchg8u(MemArg), I64AtomicRmwCmpxchg16u(MemArg), I64AtomicRmwCmpxchg32u(MemArg), } #[allow(missing_docs)] #[cfg(feature = "simd")] #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum SimdInstruction { V128Const(Box<[u8; 16]>), V128Load(MemArg), V128Store(MemArg), I8x16Splat, I16x8Splat, I32x4Splat, I64x2Splat, F32x4Splat, F64x2Splat, I8x16ExtractLaneS(u8), I8x16ExtractLaneU(u8), I16x8ExtractLaneS(u8), I16x8ExtractLaneU(u8), I32x4ExtractLane(u8), I64x2ExtractLane(u8), F32x4ExtractLane(u8), F64x2ExtractLane(u8), I8x16ReplaceLane(u8), I16x8ReplaceLane(u8), I32x4ReplaceLane(u8), I64x2ReplaceLane(u8), F32x4ReplaceLane(u8), F64x2ReplaceLane(u8), V8x16Shuffle(Box<[u8; 16]>), I8x16Add, I16x8Add, I32x4Add, I64x2Add, I8x16Sub, I16x8Sub, I32x4Sub, I64x2Sub, I8x16Mul, I16x8Mul, I32x4Mul, // I64x2Mul, I8x16Neg, I16x8Neg, I32x4Neg, I64x2Neg, I8x16AddSaturateS, I8x16AddSaturateU, I16x8AddSaturateS, I16x8AddSaturateU, I8x16SubSaturateS, I8x16SubSaturateU, I16x8SubSaturateS, I16x8SubSaturateU, I8x16Shl, I16x8Shl, I32x4Shl, I64x2Shl, I8x16ShrS, I8x16ShrU, I16x8ShrS, I16x8ShrU, I32x4ShrS, I32x4ShrU, I64x2ShrS, I64x2ShrU, V128And, V128Or, V128Xor, V128Not, V128Bitselect, I8x16AnyTrue, I16x8AnyTrue, I32x4AnyTrue, I64x2AnyTrue, I8x16AllTrue, I16x8AllTrue, I32x4AllTrue, I64x2AllTrue, I8x16Eq, I16x8Eq, I32x4Eq, // I64x2Eq, F32x4Eq, F64x2Eq, I8x16Ne, I16x8Ne, I32x4Ne, // I64x2Ne, F32x4Ne, F64x2Ne, I8x16LtS, I8x16LtU, I16x8LtS, I16x8LtU, I32x4LtS, I32x4LtU, // I64x2LtS, // I64x2LtU, F32x4Lt, F64x2Lt, I8x16LeS, I8x16LeU, I16x8LeS, I16x8LeU, I32x4LeS, I32x4LeU, // I64x2LeS, // I64x2LeU, F32x4Le, F64x2Le, I8x16GtS, I8x16GtU, I16x8GtS, I16x8GtU, I32x4GtS, I32x4GtU, // I64x2GtS, // I64x2GtU, F32x4Gt, F64x2Gt, I8x16GeS, I8x16GeU, I16x8GeS, I16x8GeU, I32x4GeS, I32x4GeU, // I64x2GeS, // I64x2GeU, F32x4Ge, F64x2Ge, F32x4Neg, F64x2Neg, F32x4Abs, F64x2Abs, F32x4Min, F64x2Min, F32x4Max, F64x2Max, F32x4Add, F64x2Add, F32x4Sub, F64x2Sub, F32x4Div, F64x2Div, F32x4Mul, F64x2Mul, F32x4Sqrt, F64x2Sqrt, F32x4ConvertSI32x4, F32x4ConvertUI32x4, F64x2ConvertSI64x2, F64x2ConvertUI64x2, I32x4TruncSF32x4Sat, I32x4TruncUF32x4Sat, I64x2TruncSF64x2Sat, I64x2TruncUF64x2Sat, } #[allow(missing_docs)] #[cfg(feature = "sign_ext")] #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum SignExtInstruction { I32Extend8S, I32Extend16S, I64Extend8S, I64Extend16S, I64Extend32S, } #[allow(missing_docs)] #[cfg(feature = "bulk")] #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum BulkInstruction { MemoryInit(u32), MemoryDrop(u32), MemoryCopy, MemoryFill, TableInit(u32), TableDrop(u32), TableCopy, } #[cfg(any(feature = "simd", feature = "atomics"))] #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[allow(missing_docs)] pub struct MemArg { pub align: u8, pub offset: u32, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[allow(missing_docs)] pub struct BrTableData { pub table: Box<[u32]>, pub default: u32, } impl Instruction { /// Is this instruction starts the new block (which should end with terminal instruction). pub fn is_block(&self) -> bool { matches!(self, &Instruction::Block(_) | &Instruction::Loop(_) | &Instruction::If(_)) } /// Is this instruction determines the termination of instruction sequence? /// /// `true` for `Instruction::End` pub fn is_terminal(&self) -> bool { matches!(self, &Instruction::End) } } #[allow(missing_docs)] pub mod opcodes { pub const UNREACHABLE: u8 = 0x00; pub const NOP: u8 = 0x01; pub const BLOCK: u8 = 0x02; pub const LOOP: u8 = 0x03; pub const IF: u8 = 0x04; pub const ELSE: u8 = 0x05; pub const END: u8 = 0x0b; pub const BR: u8 = 0x0c; pub const BRIF: u8 = 0x0d; pub const BRTABLE: u8 = 0x0e; pub const RETURN: u8 = 0x0f; pub const CALL: u8 = 0x10; pub const CALLINDIRECT: u8 = 0x11; #[cfg(feature = "tail_calls")] pub const RETURNCALL: u8 = 0x12; #[cfg(feature = "tail_calls")] pub const RETURNCALLINDIRECT: u8 = 0x13; pub const DROP: u8 = 0x1a; pub const SELECT: u8 = 0x1b; pub const GETLOCAL: u8 = 0x20; pub const SETLOCAL: u8 = 0x21; pub const TEELOCAL: u8 = 0x22; pub const GETGLOBAL: u8 = 0x23; pub const SETGLOBAL: u8 = 0x24; pub const I32LOAD: u8 = 0x28; pub const I64LOAD: u8 = 0x29; pub const F32LOAD: u8 = 0x2a; pub const F64LOAD: u8 = 0x2b; pub const I32LOAD8S: u8 = 0x2c; pub const I32LOAD8U: u8 = 0x2d; pub const I32LOAD16S: u8 = 0x2e; pub const I32LOAD16U: u8 = 0x2f; pub const I64LOAD8S: u8 = 0x30; pub const I64LOAD8U: u8 = 0x31; pub const I64LOAD16S: u8 = 0x32; pub const I64LOAD16U: u8 = 0x33; pub const I64LOAD32S: u8 = 0x34; pub const I64LOAD32U: u8 = 0x35; pub const I32STORE: u8 = 0x36; pub const I64STORE: u8 = 0x37; pub const F32STORE: u8 = 0x38; pub const F64STORE: u8 = 0x39; pub const I32STORE8: u8 = 0x3a; pub const I32STORE16: u8 = 0x3b; pub const I64STORE8: u8 = 0x3c; pub const I64STORE16: u8 = 0x3d; pub const I64STORE32: u8 = 0x3e; pub const CURRENTMEMORY: u8 = 0x3f; pub const GROWMEMORY: u8 = 0x40; pub const I32CONST: u8 = 0x41; pub const I64CONST: u8 = 0x42; pub const F32CONST: u8 = 0x43; pub const F64CONST: u8 = 0x44; pub const I32EQZ: u8 = 0x45; pub const I32EQ: u8 = 0x46; pub const I32NE: u8 = 0x47; pub const I32LTS: u8 = 0x48; pub const I32LTU: u8 = 0x49; pub const I32GTS: u8 = 0x4a; pub const I32GTU: u8 = 0x4b; pub const I32LES: u8 = 0x4c; pub const I32LEU: u8 = 0x4d; pub const I32GES: u8 = 0x4e; pub const I32GEU: u8 = 0x4f; pub const I64EQZ: u8 = 0x50; pub const I64EQ: u8 = 0x51; pub const I64NE: u8 = 0x52; pub const I64LTS: u8 = 0x53; pub const I64LTU: u8 = 0x54; pub const I64GTS: u8 = 0x55; pub const I64GTU: u8 = 0x56; pub const I64LES: u8 = 0x57; pub const I64LEU: u8 = 0x58; pub const I64GES: u8 = 0x59; pub const I64GEU: u8 = 0x5a; pub const F32EQ: u8 = 0x5b; pub const F32NE: u8 = 0x5c; pub const F32LT: u8 = 0x5d; pub const F32GT: u8 = 0x5e; pub const F32LE: u8 = 0x5f; pub const F32GE: u8 = 0x60; pub const F64EQ: u8 = 0x61; pub const F64NE: u8 = 0x62; pub const F64LT: u8 = 0x63; pub const F64GT: u8 = 0x64; pub const F64LE: u8 = 0x65; pub const F64GE: u8 = 0x66; pub const I32CLZ: u8 = 0x67; pub const I32CTZ: u8 = 0x68; pub const I32POPCNT: u8 = 0x69; pub const I32ADD: u8 = 0x6a; pub const I32SUB: u8 = 0x6b; pub const I32MUL: u8 = 0x6c; pub const I32DIVS: u8 = 0x6d; pub const I32DIVU: u8 = 0x6e; pub const I32REMS: u8 = 0x6f; pub const I32REMU: u8 = 0x70; pub const I32AND: u8 = 0x71; pub const I32OR: u8 = 0x72; pub const I32XOR: u8 = 0x73; pub const I32SHL: u8 = 0x74; pub const I32SHRS: u8 = 0x75; pub const I32SHRU: u8 = 0x76; pub const I32ROTL: u8 = 0x77; pub const I32ROTR: u8 = 0x78; pub const I64CLZ: u8 = 0x79; pub const I64CTZ: u8 = 0x7a; pub const I64POPCNT: u8 = 0x7b; pub const I64ADD: u8 = 0x7c; pub const I64SUB: u8 = 0x7d; pub const I64MUL: u8 = 0x7e; pub const I64DIVS: u8 = 0x7f; pub const I64DIVU: u8 = 0x80; pub const I64REMS: u8 = 0x81; pub const I64REMU: u8 = 0x82; pub const I64AND: u8 = 0x83; pub const I64OR: u8 = 0x84; pub const I64XOR: u8 = 0x85; pub const I64SHL: u8 = 0x86; pub const I64SHRS: u8 = 0x87; pub const I64SHRU: u8 = 0x88; pub const I64ROTL: u8 = 0x89; pub const I64ROTR: u8 = 0x8a; pub const F32ABS: u8 = 0x8b; pub const F32NEG: u8 = 0x8c; pub const F32CEIL: u8 = 0x8d; pub const F32FLOOR: u8 = 0x8e; pub const F32TRUNC: u8 = 0x8f; pub const F32NEAREST: u8 = 0x90; pub const F32SQRT: u8 = 0x91; pub const F32ADD: u8 = 0x92; pub const F32SUB: u8 = 0x93; pub const F32MUL: u8 = 0x94; pub const F32DIV: u8 = 0x95; pub const F32MIN: u8 = 0x96; pub const F32MAX: u8 = 0x97; pub const F32COPYSIGN: u8 = 0x98; pub const F64ABS: u8 = 0x99; pub const F64NEG: u8 = 0x9a; pub const F64CEIL: u8 = 0x9b; pub const F64FLOOR: u8 = 0x9c; pub const F64TRUNC: u8 = 0x9d; pub const F64NEAREST: u8 = 0x9e; pub const F64SQRT: u8 = 0x9f; pub const F64ADD: u8 = 0xa0; pub const F64SUB: u8 = 0xa1; pub const F64MUL: u8 = 0xa2; pub const F64DIV: u8 = 0xa3; pub const F64MIN: u8 = 0xa4; pub const F64MAX: u8 = 0xa5; pub const F64COPYSIGN: u8 = 0xa6; pub const I32WRAPI64: u8 = 0xa7; pub const I32TRUNCSF32: u8 = 0xa8; pub const I32TRUNCUF32: u8 = 0xa9; pub const I32TRUNCSF64: u8 = 0xaa; pub const I32TRUNCUF64: u8 = 0xab; pub const I64EXTENDSI32: u8 = 0xac; pub const I64EXTENDUI32: u8 = 0xad; pub const I64TRUNCSF32: u8 = 0xae; pub const I64TRUNCUF32: u8 = 0xaf; pub const I64TRUNCSF64: u8 = 0xb0; pub const I64TRUNCUF64: u8 = 0xb1; pub const F32CONVERTSI32: u8 = 0xb2; pub const F32CONVERTUI32: u8 = 0xb3; pub const F32CONVERTSI64: u8 = 0xb4; pub const F32CONVERTUI64: u8 = 0xb5; pub const F32DEMOTEF64: u8 = 0xb6; pub const F64CONVERTSI32: u8 = 0xb7; pub const F64CONVERTUI32: u8 = 0xb8; pub const F64CONVERTSI64: u8 = 0xb9; pub const F64CONVERTUI64: u8 = 0xba; pub const F64PROMOTEF32: u8 = 0xbb; pub const I32REINTERPRETF32: u8 = 0xbc; pub const I64REINTERPRETF64: u8 = 0xbd; pub const F32REINTERPRETI32: u8 = 0xbe; pub const F64REINTERPRETI64: u8 = 0xbf; #[cfg(feature = "sign_ext")] pub mod sign_ext { pub const I32_EXTEND8_S: u8 = 0xc0; pub const I32_EXTEND16_S: u8 = 0xc1; pub const I64_EXTEND8_S: u8 = 0xc2; pub const I64_EXTEND16_S: u8 = 0xc3; pub const I64_EXTEND32_S: u8 = 0xc4; } #[cfg(feature = "atomics")] pub mod atomics { pub const ATOMIC_PREFIX: u8 = 0xfe; pub const ATOMIC_WAKE: u8 = 0x00; pub const I32_ATOMIC_WAIT: u8 = 0x01; pub const I64_ATOMIC_WAIT: u8 = 0x02; pub const I32_ATOMIC_LOAD: u8 = 0x10; pub const I64_ATOMIC_LOAD: u8 = 0x11; pub const I32_ATOMIC_LOAD8U: u8 = 0x12; pub const I32_ATOMIC_LOAD16U: u8 = 0x13; pub const I64_ATOMIC_LOAD8U: u8 = 0x14; pub const I64_ATOMIC_LOAD16U: u8 = 0x15; pub const I64_ATOMIC_LOAD32U: u8 = 0x16; pub const I32_ATOMIC_STORE: u8 = 0x17; pub const I64_ATOMIC_STORE: u8 = 0x18; pub const I32_ATOMIC_STORE8U: u8 = 0x19; pub const I32_ATOMIC_STORE16U: u8 = 0x1a; pub const I64_ATOMIC_STORE8U: u8 = 0x1b; pub const I64_ATOMIC_STORE16U: u8 = 0x1c; pub const I64_ATOMIC_STORE32U: u8 = 0x1d; pub const I32_ATOMIC_RMW_ADD: u8 = 0x1e; pub const I64_ATOMIC_RMW_ADD: u8 = 0x1f; pub const I32_ATOMIC_RMW_ADD8U: u8 = 0x20; pub const I32_ATOMIC_RMW_ADD16U: u8 = 0x21; pub const I64_ATOMIC_RMW_ADD8U: u8 = 0x22; pub const I64_ATOMIC_RMW_ADD16U: u8 = 0x23; pub const I64_ATOMIC_RMW_ADD32U: u8 = 0x24; pub const I32_ATOMIC_RMW_SUB: u8 = 0x25; pub const I64_ATOMIC_RMW_SUB: u8 = 0x26; pub const I32_ATOMIC_RMW_SUB8U: u8 = 0x27; pub const I32_ATOMIC_RMW_SUB16U: u8 = 0x28; pub const I64_ATOMIC_RMW_SUB8U: u8 = 0x29; pub const I64_ATOMIC_RMW_SUB16U: u8 = 0x2a; pub const I64_ATOMIC_RMW_SUB32U: u8 = 0x2b; pub const I32_ATOMIC_RMW_AND: u8 = 0x2c; pub const I64_ATOMIC_RMW_AND: u8 = 0x2d; pub const I32_ATOMIC_RMW_AND8U: u8 = 0x2e; pub const I32_ATOMIC_RMW_AND16U: u8 = 0x2f; pub const I64_ATOMIC_RMW_AND8U: u8 = 0x30; pub const I64_ATOMIC_RMW_AND16U: u8 = 0x31; pub const I64_ATOMIC_RMW_AND32U: u8 = 0x32; pub const I32_ATOMIC_RMW_OR: u8 = 0x33; pub const I64_ATOMIC_RMW_OR: u8 = 0x34; pub const I32_ATOMIC_RMW_OR8U: u8 = 0x35; pub const I32_ATOMIC_RMW_OR16U: u8 = 0x36; pub const I64_ATOMIC_RMW_OR8U: u8 = 0x37; pub const I64_ATOMIC_RMW_OR16U: u8 = 0x38; pub const I64_ATOMIC_RMW_OR32U: u8 = 0x39; pub const I32_ATOMIC_RMW_XOR: u8 = 0x3a; pub const I64_ATOMIC_RMW_XOR: u8 = 0x3b; pub const I32_ATOMIC_RMW_XOR8U: u8 = 0x3c; pub const I32_ATOMIC_RMW_XOR16U: u8 = 0x3d; pub const I64_ATOMIC_RMW_XOR8U: u8 = 0x3e; pub const I64_ATOMIC_RMW_XOR16U: u8 = 0x3f; pub const I64_ATOMIC_RMW_XOR32U: u8 = 0x40; pub const I32_ATOMIC_RMW_XCHG: u8 = 0x41; pub const I64_ATOMIC_RMW_XCHG: u8 = 0x42; pub const I32_ATOMIC_RMW_XCHG8U: u8 = 0x43; pub const I32_ATOMIC_RMW_XCHG16U: u8 = 0x44; pub const I64_ATOMIC_RMW_XCHG8U: u8 = 0x45; pub const I64_ATOMIC_RMW_XCHG16U: u8 = 0x46; pub const I64_ATOMIC_RMW_XCHG32U: u8 = 0x47; pub const I32_ATOMIC_RMW_CMPXCHG: u8 = 0x48; pub const I64_ATOMIC_RMW_CMPXCHG: u8 = 0x49; pub const I32_ATOMIC_RMW_CMPXCHG8U: u8 = 0x4a; pub const I32_ATOMIC_RMW_CMPXCHG16U: u8 = 0x4b; pub const I64_ATOMIC_RMW_CMPXCHG8U: u8 = 0x4c; pub const I64_ATOMIC_RMW_CMPXCHG16U: u8 = 0x4d; pub const I64_ATOMIC_RMW_CMPXCHG32U: u8 = 0x4e; } #[cfg(feature = "simd")] pub mod simd { // https://github.com/WebAssembly/simd/blob/master/proposals/simd/BinarySIMD.md pub const SIMD_PREFIX: u8 = 0xfd; pub const V128_LOAD: u32 = 0x00; pub const V128_STORE: u32 = 0x01; pub const V128_CONST: u32 = 0x02; pub const V8X16_SHUFFLE: u32 = 0x03; pub const I8X16_SPLAT: u32 = 0x04; pub const I8X16_EXTRACT_LANE_S: u32 = 0x05; pub const I8X16_EXTRACT_LANE_U: u32 = 0x06; pub const I8X16_REPLACE_LANE: u32 = 0x07; pub const I16X8_SPLAT: u32 = 0x08; pub const I16X8_EXTRACT_LANE_S: u32 = 0x09; pub const I16X8_EXTRACT_LANE_U: u32 = 0xa; pub const I16X8_REPLACE_LANE: u32 = 0x0b; pub const I32X4_SPLAT: u32 = 0x0c; pub const I32X4_EXTRACT_LANE: u32 = 0x0d; pub const I32X4_REPLACE_LANE: u32 = 0x0e; pub const I64X2_SPLAT: u32 = 0x0f; pub const I64X2_EXTRACT_LANE: u32 = 0x10; pub const I64X2_REPLACE_LANE: u32 = 0x11; pub const F32X4_SPLAT: u32 = 0x12; pub const F32X4_EXTRACT_LANE: u32 = 0x13; pub const F32X4_REPLACE_LANE: u32 = 0x14; pub const F64X2_SPLAT: u32 = 0x15; pub const F64X2_EXTRACT_LANE: u32 = 0x16; pub const F64X2_REPLACE_LANE: u32 = 0x17; pub const I8X16_EQ: u32 = 0x18; pub const I8X16_NE: u32 = 0x19; pub const I8X16_LT_S: u32 = 0x1a; pub const I8X16_LT_U: u32 = 0x1b; pub const I8X16_GT_S: u32 = 0x1c; pub const I8X16_GT_U: u32 = 0x1d; pub const I8X16_LE_S: u32 = 0x1e; pub const I8X16_LE_U: u32 = 0x1f; pub const I8X16_GE_S: u32 = 0x20; pub const I8X16_GE_U: u32 = 0x21; pub const I16X8_EQ: u32 = 0x22; pub const I16X8_NE: u32 = 0x23; pub const I16X8_LT_S: u32 = 0x24; pub const I16X8_LT_U: u32 = 0x25; pub const I16X8_GT_S: u32 = 0x26; pub const I16X8_GT_U: u32 = 0x27; pub const I16X8_LE_S: u32 = 0x28; pub const I16X8_LE_U: u32 = 0x29; pub const I16X8_GE_S: u32 = 0x2a; pub const I16X8_GE_U: u32 = 0x2b; pub const I32X4_EQ: u32 = 0x2c; pub const I32X4_NE: u32 = 0x2d; pub const I32X4_LT_S: u32 = 0x2e; pub const I32X4_LT_U: u32 = 0x2f; pub const I32X4_GT_S: u32 = 0x30; pub const I32X4_GT_U: u32 = 0x31; pub const I32X4_LE_S: u32 = 0x32; pub const I32X4_LE_U: u32 = 0x33; pub const I32X4_GE_S: u32 = 0x34; pub const I32X4_GE_U: u32 = 0x35; pub const F32X4_EQ: u32 = 0x40; pub const F32X4_NE: u32 = 0x41; pub const F32X4_LT: u32 = 0x42; pub const F32X4_GT: u32 = 0x43; pub const F32X4_LE: u32 = 0x44; pub const F32X4_GE: u32 = 0x45; pub const F64X2_EQ: u32 = 0x46; pub const F64X2_NE: u32 = 0x47; pub const F64X2_LT: u32 = 0x48; pub const F64X2_GT: u32 = 0x49; pub const F64X2_LE: u32 = 0x4a; pub const F64X2_GE: u32 = 0x4b; pub const V128_NOT: u32 = 0x4c; pub const V128_AND: u32 = 0x4d; pub const V128_OR: u32 = 0x4e; pub const V128_XOR: u32 = 0x4f; pub const V128_BITSELECT: u32 = 0x50; pub const I8X16_NEG: u32 = 0x51; pub const I8X16_ANY_TRUE: u32 = 0x52; pub const I8X16_ALL_TRUE: u32 = 0x53; pub const I8X16_SHL: u32 = 0x54; pub const I8X16_SHR_S: u32 = 0x55; pub const I8X16_SHR_U: u32 = 0x56; pub const I8X16_ADD: u32 = 0x57; pub const I8X16_ADD_SATURATE_S: u32 = 0x58; pub const I8X16_ADD_SATURATE_U: u32 = 0x59; pub const I8X16_SUB: u32 = 0x5a; pub const I8X16_SUB_SATURATE_S: u32 = 0x5b; pub const I8X16_SUB_SATURATE_U: u32 = 0x5c; pub const I8X16_MUL: u32 = 0x5d; pub const I16X8_NEG: u32 = 0x62; pub const I16X8_ANY_TRUE: u32 = 0x63; pub const I16X8_ALL_TRUE: u32 = 0x64; pub const I16X8_SHL: u32 = 0x65; pub const I16X8_SHR_S: u32 = 0x66; pub const I16X8_SHR_U: u32 = 0x67; pub const I16X8_ADD: u32 = 0x68; pub const I16X8_ADD_SATURATE_S: u32 = 0x69; pub const I16X8_ADD_SATURATE_U: u32 = 0x6a; pub const I16X8_SUB: u32 = 0x6b; pub const I16X8_SUB_SATURATE_S: u32 = 0x6c; pub const I16X8_SUB_SATURATE_U: u32 = 0x6d; pub const I16X8_MUL: u32 = 0x6e; pub const I32X4_NEG: u32 = 0x73; pub const I32X4_ANY_TRUE: u32 = 0x74; pub const I32X4_ALL_TRUE: u32 = 0x75; pub const I32X4_SHL: u32 = 0x76; pub const I32X4_SHR_S: u32 = 0x77; pub const I32X4_SHR_U: u32 = 0x78; pub const I32X4_ADD: u32 = 0x79; pub const I32X4_ADD_SATURATE_S: u32 = 0x7a; pub const I32X4_ADD_SATURATE_U: u32 = 0x7b; pub const I32X4_SUB: u32 = 0x7c; pub const I32X4_SUB_SATURATE_S: u32 = 0x7d; pub const I32X4_SUB_SATURATE_U: u32 = 0x7e; pub const I32X4_MUL: u32 = 0x7f; pub const I64X2_NEG: u32 = 0x84; pub const I64X2_ANY_TRUE: u32 = 0x85; pub const I64X2_ALL_TRUE: u32 = 0x86; pub const I64X2_SHL: u32 = 0x87; pub const I64X2_SHR_S: u32 = 0x88; pub const I64X2_SHR_U: u32 = 0x89; pub const I64X2_ADD: u32 = 0x8a; pub const I64X2_SUB: u32 = 0x8d; pub const F32X4_ABS: u32 = 0x95; pub const F32X4_NEG: u32 = 0x96; pub const F32X4_SQRT: u32 = 0x97; pub const F32X4_ADD: u32 = 0x9a; pub const F32X4_SUB: u32 = 0x9b; pub const F32X4_MUL: u32 = 0x9c; pub const F32X4_DIV: u32 = 0x9d; pub const F32X4_MIN: u32 = 0x9e; pub const F32X4_MAX: u32 = 0x9f; pub const F64X2_ABS: u32 = 0xa0; pub const F64X2_NEG: u32 = 0xa1; pub const F64X2_SQRT: u32 = 0xa2; pub const F64X2_ADD: u32 = 0xa5; pub const F64X2_SUB: u32 = 0xa6; pub const F64X2_MUL: u32 = 0xa7; pub const F64X2_DIV: u32 = 0xa8; pub const F64X2_MIN: u32 = 0xa9; pub const F64X2_MAX: u32 = 0xaa; pub const I32X4_TRUNC_S_F32X4_SAT: u32 = 0xab; pub const I32X4_TRUNC_U_F32X4_SAT: u32 = 0xac; pub const I64X2_TRUNC_S_F64X2_SAT: u32 = 0xad; pub const I64X2_TRUNC_U_F64X2_SAT: u32 = 0xae; pub const F32X4_CONVERT_S_I32X4: u32 = 0xaf; pub const F32X4_CONVERT_U_I32X4: u32 = 0xb0; pub const F64X2_CONVERT_S_I64X2: u32 = 0xb1; pub const F64X2_CONVERT_U_I64X2: u32 = 0xb2; } #[cfg(feature = "bulk")] pub mod bulk { pub const BULK_PREFIX: u8 = 0xfc; pub const MEMORY_INIT: u8 = 0x08; pub const MEMORY_DROP: u8 = 0x09; pub const MEMORY_COPY: u8 = 0x0a; pub const MEMORY_FILL: u8 = 0x0b; pub const TABLE_INIT: u8 = 0x0c; pub const TABLE_DROP: u8 = 0x0d; pub const TABLE_COPY: u8 = 0x0e; } } impl Deserialize for Instruction { type Error = Error; fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> { use self::{opcodes::*, Instruction::*}; #[cfg(feature = "sign_ext")] use self::opcodes::sign_ext::*; let val: u8 = Uint8::deserialize(reader)?.into(); Ok(match val { UNREACHABLE => Unreachable, NOP => Nop, BLOCK => Block(BlockType::deserialize(reader)?), LOOP => Loop(BlockType::deserialize(reader)?), IF => If(BlockType::deserialize(reader)?), ELSE => Else, END => End, BR => Br(VarUint32::deserialize(reader)?.into()), BRIF => BrIf(VarUint32::deserialize(reader)?.into()), BRTABLE => { let t1: Vec<u32> = CountedList::<VarUint32>::deserialize(reader)? .into_inner() .into_iter() .map(Into::into) .collect(); BrTable(Box::new(BrTableData { table: t1.into_boxed_slice(), default: VarUint32::deserialize(reader)?.into(), })) }, RETURN => Return, CALL => Call(VarUint32::deserialize(reader)?.into()), CALLINDIRECT => { let signature: u32 = VarUint32::deserialize(reader)?.into(); let table_ref: u8 = Uint8::deserialize(reader)?.into(); if table_ref != 0 { return Err(Error::InvalidTableReference(table_ref)) } CallIndirect(signature, table_ref) }, DROP => Drop, SELECT => Select, GETLOCAL => GetLocal(VarUint32::deserialize(reader)?.into()), SETLOCAL => SetLocal(VarUint32::deserialize(reader)?.into()), TEELOCAL => TeeLocal(VarUint32::deserialize(reader)?.into()), GETGLOBAL => GetGlobal(VarUint32::deserialize(reader)?.into()), SETGLOBAL => SetGlobal(VarUint32::deserialize(reader)?.into()), I32LOAD => I32Load( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64LOAD => I64Load( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), F32LOAD => F32Load( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), F64LOAD => F64Load( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I32LOAD8S => I32Load8S( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I32LOAD8U => I32Load8U( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I32LOAD16S => I32Load16S( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I32LOAD16U => I32Load16U( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64LOAD8S => I64Load8S( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64LOAD8U => I64Load8U( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64LOAD16S => I64Load16S( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64LOAD16U => I64Load16U( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64LOAD32S => I64Load32S( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64LOAD32U => I64Load32U( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I32STORE => I32Store( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64STORE => I64Store( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), F32STORE => F32Store( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), F64STORE => F64Store( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I32STORE8 => I32Store8( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I32STORE16 => I32Store16( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64STORE8 => I64Store8( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64STORE16 => I64Store16( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), I64STORE32 => I64Store32( VarUint32::deserialize(reader)?.into(), VarUint32::deserialize(reader)?.into(), ), CURRENTMEMORY => { let mem_ref: u8 = Uint8::deserialize(reader)?.into(); if mem_ref != 0 { return Err(Error::InvalidMemoryReference(mem_ref)) } CurrentMemory(mem_ref) }, GROWMEMORY => { let mem_ref: u8 = Uint8::deserialize(reader)?.into(); if mem_ref != 0 { return Err(Error::InvalidMemoryReference(mem_ref)) } GrowMemory(mem_ref) }, I32CONST => I32Const(VarInt32::deserialize(reader)?.into()), I64CONST => I64Const(VarInt64::deserialize(reader)?.into()), F32CONST => F32Const(Uint32::deserialize(reader)?.into()), F64CONST => F64Const(Uint64::deserialize(reader)?.into()), I32EQZ => I32Eqz, I32EQ => I32Eq, I32NE => I32Ne, I32LTS => I32LtS, I32LTU => I32LtU, I32GTS => I32GtS, I32GTU => I32GtU, I32LES => I32LeS, I32LEU => I32LeU, I32GES => I32GeS, I32GEU => I32GeU, I64EQZ => I64Eqz, I64EQ => I64Eq, I64NE => I64Ne, I64LTS => I64LtS, I64LTU => I64LtU, I64GTS => I64GtS, I64GTU => I64GtU, I64LES => I64LeS, I64LEU => I64LeU, I64GES => I64GeS, I64GEU => I64GeU, F32EQ => F32Eq, F32NE => F32Ne, F32LT => F32Lt, F32GT => F32Gt, F32LE => F32Le, F32GE => F32Ge, F64EQ => F64Eq, F64NE => F64Ne, F64LT => F64Lt, F64GT => F64Gt, F64LE => F64Le, F64GE => F64Ge, I32CLZ => I32Clz, I32CTZ => I32Ctz, I32POPCNT => I32Popcnt, I32ADD => I32Add, I32SUB => I32Sub, I32MUL => I32Mul, I32DIVS => I32DivS, I32DIVU => I32DivU, I32REMS => I32RemS, I32REMU => I32RemU, I32AND => I32And, I32OR => I32Or, I32XOR => I32Xor, I32SHL => I32Shl, I32SHRS => I32ShrS, I32SHRU => I32ShrU, I32ROTL => I32Rotl, I32ROTR => I32Rotr, I64CLZ => I64Clz, I64CTZ => I64Ctz, I64POPCNT => I64Popcnt, I64ADD => I64Add, I64SUB => I64Sub, I64MUL => I64Mul, I64DIVS => I64DivS, I64DIVU => I64DivU, I64REMS => I64RemS, I64REMU => I64RemU, I64AND => I64And, I64OR => I64Or, I64XOR => I64Xor, I64SHL => I64Shl, I64SHRS => I64ShrS, I64SHRU => I64ShrU, I64ROTL => I64Rotl, I64ROTR => I64Rotr, F32ABS => F32Abs, F32NEG => F32Neg, F32CEIL => F32Ceil, F32FLOOR => F32Floor, F32TRUNC => F32Trunc, F32NEAREST => F32Nearest, F32SQRT => F32Sqrt, F32ADD => F32Add, F32SUB => F32Sub, F32MUL => F32Mul, F32DIV => F32Div, F32MIN => F32Min, F32MAX => F32Max, F32COPYSIGN => F32Copysign, F64ABS => F64Abs, F64NEG => F64Neg, F64CEIL => F64Ceil, F64FLOOR => F64Floor, F64TRUNC => F64Trunc, F64NEAREST => F64Nearest, F64SQRT => F64Sqrt, F64ADD => F64Add, F64SUB => F64Sub, F64MUL => F64Mul, F64DIV => F64Div, F64MIN => F64Min, F64MAX => F64Max, F64COPYSIGN => F64Copysign, I32WRAPI64 => I32WrapI64, I32TRUNCSF32 => I32TruncSF32, I32TRUNCUF32 => I32TruncUF32, I32TRUNCSF64 => I32TruncSF64, I32TRUNCUF64 => I32TruncUF64, I64EXTENDSI32 => I64ExtendSI32, I64EXTENDUI32 => I64ExtendUI32, I64TRUNCSF32 => I64TruncSF32, I64TRUNCUF32 => I64TruncUF32, I64TRUNCSF64 => I64TruncSF64, I64TRUNCUF64 => I64TruncUF64, F32CONVERTSI32 => F32ConvertSI32, F32CONVERTUI32 => F32ConvertUI32, F32CONVERTSI64 => F32ConvertSI64, F32CONVERTUI64 => F32ConvertUI64, F32DEMOTEF64 => F32DemoteF64, F64CONVERTSI32 => F64ConvertSI32, F64CONVERTUI32 => F64ConvertUI32, F64CONVERTSI64 => F64ConvertSI64, F64CONVERTUI64 => F64ConvertUI64, F64PROMOTEF32 => F64PromoteF32, I32REINTERPRETF32 => I32ReinterpretF32, I64REINTERPRETF64 => I64ReinterpretF64, F32REINTERPRETI32 => F32ReinterpretI32, F64REINTERPRETI64 => F64ReinterpretI64, #[cfg(feature = "sign_ext")] I32_EXTEND8_S | I32_EXTEND16_S | I64_EXTEND8_S | I64_EXTEND16_S | I64_EXTEND32_S => match val { I32_EXTEND8_S => SignExt(SignExtInstruction::I32Extend8S), I32_EXTEND16_S => SignExt(SignExtInstruction::I32Extend16S), I64_EXTEND8_S => SignExt(SignExtInstruction::I64Extend8S), I64_EXTEND16_S => SignExt(SignExtInstruction::I64Extend16S), I64_EXTEND32_S => SignExt(SignExtInstruction::I64Extend32S), _ => return Err(Error::UnknownOpcode(val)), }, #[cfg(feature = "atomics")] atomics::ATOMIC_PREFIX => return deserialize_atomic(reader), #[cfg(feature = "simd")] simd::SIMD_PREFIX => return deserialize_simd(reader), #[cfg(feature = "bulk")] bulk::BULK_PREFIX => return deserialize_bulk(reader), #[cfg(feature = "tail_calls")] RETURNCALL => ReturnCall(VarUint32::deserialize(reader)?.into()), #[cfg(feature = "tail_calls")] RETURNCALLINDIRECT => { let signature: u32 = VarUint32::deserialize(reader)?.into(); let table_ref: u8 = Uint8::deserialize(reader)?.into(); if table_ref != 0 { return Err(Error::InvalidTableReference(table_ref)); } ReturnCallIndirect(signature, table_ref) } _ => return Err(Error::UnknownOpcode(val)), }) } } #[cfg(feature = "atomics")] fn deserialize_atomic<R: io::Read>(reader: &mut R) -> Result<Instruction, Error> { use self::{opcodes::atomics::*, AtomicsInstruction::*}; let val: u8 = Uint8::deserialize(reader)?.into(); let mem = MemArg::deserialize(reader)?; Ok(Instruction::Atomics(match val { ATOMIC_WAKE => AtomicWake(mem), I32_ATOMIC_WAIT => I32AtomicWait(mem), I64_ATOMIC_WAIT => I64AtomicWait(mem), I32_ATOMIC_LOAD => I32AtomicLoad(mem), I64_ATOMIC_LOAD => I64AtomicLoad(mem), I32_ATOMIC_LOAD8U => I32AtomicLoad8u(mem), I32_ATOMIC_LOAD16U => I32AtomicLoad16u(mem), I64_ATOMIC_LOAD8U => I64AtomicLoad8u(mem), I64_ATOMIC_LOAD16U => I64AtomicLoad16u(mem), I64_ATOMIC_LOAD32U => I64AtomicLoad32u(mem), I32_ATOMIC_STORE => I32AtomicStore(mem), I64_ATOMIC_STORE => I64AtomicStore(mem), I32_ATOMIC_STORE8U => I32AtomicStore8u(mem), I32_ATOMIC_STORE16U => I32AtomicStore16u(mem), I64_ATOMIC_STORE8U => I64AtomicStore8u(mem), I64_ATOMIC_STORE16U => I64AtomicStore16u(mem), I64_ATOMIC_STORE32U => I64AtomicStore32u(mem), I32_ATOMIC_RMW_ADD => I32AtomicRmwAdd(mem), I64_ATOMIC_RMW_ADD => I64AtomicRmwAdd(mem), I32_ATOMIC_RMW_ADD8U => I32AtomicRmwAdd8u(mem), I32_ATOMIC_RMW_ADD16U => I32AtomicRmwAdd16u(mem), I64_ATOMIC_RMW_ADD8U => I64AtomicRmwAdd8u(mem), I64_ATOMIC_RMW_ADD16U => I64AtomicRmwAdd16u(mem), I64_ATOMIC_RMW_ADD32U => I64AtomicRmwAdd32u(mem), I32_ATOMIC_RMW_SUB => I32AtomicRmwSub(mem), I64_ATOMIC_RMW_SUB => I64AtomicRmwSub(mem), I32_ATOMIC_RMW_SUB8U => I32AtomicRmwSub8u(mem), I32_ATOMIC_RMW_SUB16U => I32AtomicRmwSub16u(mem), I64_ATOMIC_RMW_SUB8U => I64AtomicRmwSub8u(mem), I64_ATOMIC_RMW_SUB16U => I64AtomicRmwSub16u(mem), I64_ATOMIC_RMW_SUB32U => I64AtomicRmwSub32u(mem), I32_ATOMIC_RMW_AND => I32AtomicRmwAnd(mem), I64_ATOMIC_RMW_AND => I64AtomicRmwAnd(mem), I32_ATOMIC_RMW_AND8U => I32AtomicRmwAnd8u(mem), I32_ATOMIC_RMW_AND16U => I32AtomicRmwAnd16u(mem), I64_ATOMIC_RMW_AND8U => I64AtomicRmwAnd8u(mem), I64_ATOMIC_RMW_AND16U => I64AtomicRmwAnd16u(mem), I64_ATOMIC_RMW_AND32U => I64AtomicRmwAnd32u(mem), I32_ATOMIC_RMW_OR => I32AtomicRmwOr(mem), I64_ATOMIC_RMW_OR => I64AtomicRmwOr(mem), I32_ATOMIC_RMW_OR8U => I32AtomicRmwOr8u(mem), I32_ATOMIC_RMW_OR16U => I32AtomicRmwOr16u(mem), I64_ATOMIC_RMW_OR8U => I64AtomicRmwOr8u(mem), I64_ATOMIC_RMW_OR16U => I64AtomicRmwOr16u(mem), I64_ATOMIC_RMW_OR32U => I64AtomicRmwOr32u(mem), I32_ATOMIC_RMW_XOR => I32AtomicRmwXor(mem), I64_ATOMIC_RMW_XOR => I64AtomicRmwXor(mem), I32_ATOMIC_RMW_XOR8U => I32AtomicRmwXor8u(mem), I32_ATOMIC_RMW_XOR16U => I32AtomicRmwXor16u(mem), I64_ATOMIC_RMW_XOR8U => I64AtomicRmwXor8u(mem), I64_ATOMIC_RMW_XOR16U => I64AtomicRmwXor16u(mem), I64_ATOMIC_RMW_XOR32U => I64AtomicRmwXor32u(mem), I32_ATOMIC_RMW_XCHG => I32AtomicRmwXchg(mem), I64_ATOMIC_RMW_XCHG => I64AtomicRmwXchg(mem), I32_ATOMIC_RMW_XCHG8U => I32AtomicRmwXchg8u(mem), I32_ATOMIC_RMW_XCHG16U => I32AtomicRmwXchg16u(mem), I64_ATOMIC_RMW_XCHG8U => I64AtomicRmwXchg8u(mem), I64_ATOMIC_RMW_XCHG16U => I64AtomicRmwXchg16u(mem), I64_ATOMIC_RMW_XCHG32U => I64AtomicRmwXchg32u(mem), I32_ATOMIC_RMW_CMPXCHG => I32AtomicRmwCmpxchg(mem), I64_ATOMIC_RMW_CMPXCHG => I64AtomicRmwCmpxchg(mem), I32_ATOMIC_RMW_CMPXCHG8U => I32AtomicRmwCmpxchg8u(mem), I32_ATOMIC_RMW_CMPXCHG16U => I32AtomicRmwCmpxchg16u(mem), I64_ATOMIC_RMW_CMPXCHG8U => I64AtomicRmwCmpxchg8u(mem), I64_ATOMIC_RMW_CMPXCHG16U => I64AtomicRmwCmpxchg16u(mem), I64_ATOMIC_RMW_CMPXCHG32U => I64AtomicRmwCmpxchg32u(mem), _ => return Err(Error::UnknownOpcode(val)), })) } #[cfg(feature = "simd")] fn deserialize_simd<R: io::Read>(reader: &mut R) -> Result<Instruction, Error> { use self::{opcodes::simd::*, SimdInstruction::*}; let val = VarUint32::deserialize(reader)?.into(); Ok(Instruction::Simd(match val { V128_CONST => { let mut buf = [0; 16]; reader.read(&mut buf)?; V128Const(Box::new(buf)) }, V128_LOAD => V128Load(MemArg::deserialize(reader)?), V128_STORE => V128Store(MemArg::deserialize(reader)?), I8X16_SPLAT => I8x16Splat, I16X8_SPLAT => I16x8Splat, I32X4_SPLAT => I32x4Splat, I64X2_SPLAT => I64x2Splat, F32X4_SPLAT => F32x4Splat, F64X2_SPLAT => F64x2Splat, I8X16_EXTRACT_LANE_S => I8x16ExtractLaneS(Uint8::deserialize(reader)?.into()), I8X16_EXTRACT_LANE_U => I8x16ExtractLaneU(Uint8::deserialize(reader)?.into()), I16X8_EXTRACT_LANE_S => I16x8ExtractLaneS(Uint8::deserialize(reader)?.into()), I16X8_EXTRACT_LANE_U => I16x8ExtractLaneU(Uint8::deserialize(reader)?.into()), I32X4_EXTRACT_LANE => I32x4ExtractLane(Uint8::deserialize(reader)?.into()), I64X2_EXTRACT_LANE => I64x2ExtractLane(Uint8::deserialize(reader)?.into()), F32X4_EXTRACT_LANE => F32x4ExtractLane(Uint8::deserialize(reader)?.into()), F64X2_EXTRACT_LANE => F64x2ExtractLane(Uint8::deserialize(reader)?.into()), I8X16_REPLACE_LANE => I8x16ReplaceLane(Uint8::deserialize(reader)?.into()), I16X8_REPLACE_LANE => I16x8ReplaceLane(Uint8::deserialize(reader)?.into()), I32X4_REPLACE_LANE => I32x4ReplaceLane(Uint8::deserialize(reader)?.into()), I64X2_REPLACE_LANE => I64x2ReplaceLane(Uint8::deserialize(reader)?.into()), F32X4_REPLACE_LANE => F32x4ReplaceLane(Uint8::deserialize(reader)?.into()), F64X2_REPLACE_LANE => F64x2ReplaceLane(Uint8::deserialize(reader)?.into()), V8X16_SHUFFLE => { let mut buf = [0; 16]; reader.read(&mut buf)?; V8x16Shuffle(Box::new(buf)) }, I8X16_ADD => I8x16Add, I16X8_ADD => I16x8Add, I32X4_ADD => I32x4Add, I64X2_ADD => I64x2Add, I8X16_SUB => I8x16Sub, I16X8_SUB => I16x8Sub, I32X4_SUB => I32x4Sub, I64X2_SUB => I64x2Sub, I8X16_MUL => I8x16Mul, I16X8_MUL => I16x8Mul, I32X4_MUL => I32x4Mul, // I64X2_MUL => I64x2Mul, I8X16_NEG => I8x16Neg, I16X8_NEG => I16x8Neg, I32X4_NEG => I32x4Neg, I64X2_NEG => I64x2Neg, I8X16_ADD_SATURATE_S => I8x16AddSaturateS, I8X16_ADD_SATURATE_U => I8x16AddSaturateU, I16X8_ADD_SATURATE_S => I16x8AddSaturateS, I16X8_ADD_SATURATE_U => I16x8AddSaturateU, I8X16_SUB_SATURATE_S => I8x16SubSaturateS, I8X16_SUB_SATURATE_U => I8x16SubSaturateU, I16X8_SUB_SATURATE_S => I16x8SubSaturateS, I16X8_SUB_SATURATE_U => I16x8SubSaturateU, I8X16_SHL => I8x16Shl, I16X8_SHL => I16x8Shl, I32X4_SHL => I32x4Shl, I64X2_SHL => I64x2Shl, I8X16_SHR_S => I8x16ShrS, I8X16_SHR_U => I8x16ShrU, I16X8_SHR_S => I16x8ShrS, I16X8_SHR_U => I16x8ShrU, I32X4_SHR_S => I32x4ShrS, I32X4_SHR_U => I32x4ShrU, I64X2_SHR_S => I64x2ShrS, I64X2_SHR_U => I64x2ShrU, V128_AND => V128And, V128_OR => V128Or, V128_XOR => V128Xor, V128_NOT => V128Not, V128_BITSELECT => V128Bitselect, I8X16_ANY_TRUE => I8x16AnyTrue, I16X8_ANY_TRUE => I16x8AnyTrue, I32X4_ANY_TRUE => I32x4AnyTrue, I64X2_ANY_TRUE => I64x2AnyTrue, I8X16_ALL_TRUE => I8x16AllTrue, I16X8_ALL_TRUE => I16x8AllTrue, I32X4_ALL_TRUE => I32x4AllTrue, I64X2_ALL_TRUE => I64x2AllTrue, I8X16_EQ => I8x16Eq, I16X8_EQ => I16x8Eq, I32X4_EQ => I32x4Eq, // I64X2_EQ => I64x2Eq, F32X4_EQ => F32x4Eq, F64X2_EQ => F64x2Eq, I8X16_NE => I8x16Ne, I16X8_NE => I16x8Ne, I32X4_NE => I32x4Ne, // I64X2_NE => I64x2Ne, F32X4_NE => F32x4Ne, F64X2_NE => F64x2Ne, I8X16_LT_S => I8x16LtS, I8X16_LT_U => I8x16LtU, I16X8_LT_S => I16x8LtS, I16X8_LT_U => I16x8LtU, I32X4_LT_S => I32x4LtS, I32X4_LT_U => I32x4LtU, // I64X2_LT_S => I64x2LtS, // I64X2_LT_U => I64x2LtU, F32X4_LT => F32x4Lt, F64X2_LT => F64x2Lt, I8X16_LE_S => I8x16LeS, I8X16_LE_U => I8x16LeU, I16X8_LE_S => I16x8LeS, I16X8_LE_U => I16x8LeU, I32X4_LE_S => I32x4LeS, I32X4_LE_U => I32x4LeU, // I64X2_LE_S => I64x2LeS, // I64X2_LE_U => I64x2LeU, F32X4_LE => F32x4Le, F64X2_LE => F64x2Le, I8X16_GT_S => I8x16GtS, I8X16_GT_U => I8x16GtU, I16X8_GT_S => I16x8GtS, I16X8_GT_U => I16x8GtU, I32X4_GT_S => I32x4GtS, I32X4_GT_U => I32x4GtU, // I64X2_GT_S => I64x2GtS, // I64X2_GT_U => I64x2GtU, F32X4_GT => F32x4Gt, F64X2_GT => F64x2Gt, I8X16_GE_S => I8x16GeS, I8X16_GE_U => I8x16GeU, I16X8_GE_S => I16x8GeS, I16X8_GE_U => I16x8GeU, I32X4_GE_S => I32x4GeS, I32X4_GE_U => I32x4GeU, // I64X2_GE_S => I64x2GeS, // I64X2_GE_U => I64x2GeU, F32X4_GE => F32x4Ge, F64X2_GE => F64x2Ge, F32X4_NEG => F32x4Neg, F64X2_NEG => F64x2Neg, F32X4_ABS => F32x4Abs, F64X2_ABS => F64x2Abs, F32X4_MIN => F32x4Min, F64X2_MIN => F64x2Min, F32X4_MAX => F32x4Max, F64X2_MAX => F64x2Max, F32X4_ADD => F32x4Add, F64X2_ADD => F64x2Add, F32X4_SUB => F32x4Sub, F64X2_SUB => F64x2Sub, F32X4_DIV => F32x4Div, F64X2_DIV => F64x2Div, F32X4_MUL => F32x4Mul, F64X2_MUL => F64x2Mul, F32X4_SQRT => F32x4Sqrt, F64X2_SQRT => F64x2Sqrt, F32X4_CONVERT_S_I32X4 => F32x4ConvertSI32x4, F32X4_CONVERT_U_I32X4 => F32x4ConvertUI32x4, F64X2_CONVERT_S_I64X2 => F64x2ConvertSI64x2, F64X2_CONVERT_U_I64X2 => F64x2ConvertUI64x2, I32X4_TRUNC_S_F32X4_SAT => I32x4TruncSF32x4Sat, I32X4_TRUNC_U_F32X4_SAT => I32x4TruncUF32x4Sat, I64X2_TRUNC_S_F64X2_SAT => I64x2TruncSF64x2Sat, I64X2_TRUNC_U_F64X2_SAT => I64x2TruncUF64x2Sat, _ => return Err(Error::UnknownSimdOpcode(val)), })) } #[cfg(feature = "bulk")] fn deserialize_bulk<R: io::Read>(reader: &mut R) -> Result<Instruction, Error> { use self::{opcodes::bulk::*, BulkInstruction::*}; let val: u8 = Uint8::deserialize(reader)?.into(); Ok(Instruction::Bulk(match val { MEMORY_INIT => { if u8::from(Uint8::deserialize(reader)?) != 0 { return Err(Error::UnknownOpcode(val)) } MemoryInit(VarUint32::deserialize(reader)?.into()) }, MEMORY_DROP => MemoryDrop(VarUint32::deserialize(reader)?.into()), MEMORY_FILL => { if u8::from(Uint8::deserialize(reader)?) != 0 { return Err(Error::UnknownOpcode(val)) } MemoryFill }, MEMORY_COPY => { if u8::from(Uint8::deserialize(reader)?) != 0 { return Err(Error::UnknownOpcode(val)) } MemoryCopy }, TABLE_INIT => { if u8::from(Uint8::deserialize(reader)?) != 0 { return Err(Error::UnknownOpcode(val)) } TableInit(VarUint32::deserialize(reader)?.into()) }, TABLE_DROP => TableDrop(VarUint32::deserialize(reader)?.into()), TABLE_COPY => { if u8::from(Uint8::deserialize(reader)?) != 0 { return Err(Error::UnknownOpcode(val)) } TableCopy }, _ => return Err(Error::UnknownOpcode(val)), })) } #[cfg(any(feature = "simd", feature = "atomics"))] impl Deserialize for MemArg { type Error = Error; fn deserialize<R: io::Read>(reader: &mut R) -> Result<Self, Self::Error> { let align = Uint8::deserialize(reader)?; let offset = VarUint32::deserialize(reader)?; Ok(MemArg { align: align.into(), offset: offset.into() }) } } macro_rules! op { ($writer: expr, $byte: expr) => {{ let b: u8 = $byte; $writer.write(&[b])?; }}; ($writer: expr, $byte: expr, $s: block) => {{ op!($writer, $byte); $s; }}; } #[cfg(feature = "atomics")] macro_rules! atomic { ($writer: expr, $byte: expr, $mem:expr) => {{ $writer.write(&[ATOMIC_PREFIX, $byte])?; MemArg::serialize($mem, $writer)?; }}; } #[cfg(feature = "simd")] macro_rules! simd { ($writer: expr, $byte: expr, $other:expr) => {{ $writer.write(&[SIMD_PREFIX])?; VarUint32::from($byte).serialize($writer)?; $other; }}; } #[cfg(feature = "bulk")] macro_rules! bulk { ($writer: expr, $byte: expr) => {{ $writer.write(&[BULK_PREFIX, $byte])?; }}; ($writer: expr, $byte: expr, $remaining:expr) => {{ bulk!($writer, $byte); $remaining; }}; } impl Serialize for Instruction { type Error = Error; fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> { use self::{opcodes::*, Instruction::*}; match self { Unreachable => op!(writer, UNREACHABLE), Nop => op!(writer, NOP), Block(block_type) => op!(writer, BLOCK, { block_type.serialize(writer)?; }), Loop(block_type) => op!(writer, LOOP, { block_type.serialize(writer)?; }), If(block_type) => op!(writer, IF, { block_type.serialize(writer)?; }), Else => op!(writer, ELSE), End => op!(writer, END), Br(idx) => op!(writer, BR, { VarUint32::from(idx).serialize(writer)?; }), BrIf(idx) => op!(writer, BRIF, { VarUint32::from(idx).serialize(writer)?; }), BrTable(ref table) => op!(writer, BRTABLE, { let list_writer = CountedListWriter::<VarUint32, _>( table.table.len(), table.table.iter().map(|x| VarUint32::from(*x)), ); list_writer.serialize(writer)?; VarUint32::from(table.default).serialize(writer)?; }), Return => op!(writer, RETURN), Call(index) => op!(writer, CALL, { VarUint32::from(index).serialize(writer)?; }), CallIndirect(index, reserved) => op!(writer, CALLINDIRECT, { VarUint32::from(index).serialize(writer)?; Uint8::from(reserved).serialize(writer)?; }), Drop => op!(writer, DROP), Select => op!(writer, SELECT), GetLocal(index) => op!(writer, GETLOCAL, { VarUint32::from(index).serialize(writer)?; }), SetLocal(index) => op!(writer, SETLOCAL, { VarUint32::from(index).serialize(writer)?; }), TeeLocal(index) => op!(writer, TEELOCAL, { VarUint32::from(index).serialize(writer)?; }), GetGlobal(index) => op!(writer, GETGLOBAL, { VarUint32::from(index).serialize(writer)?; }), SetGlobal(index) => op!(writer, SETGLOBAL, { VarUint32::from(index).serialize(writer)?; }), I32Load(flags, offset) => op!(writer, I32LOAD, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Load(flags, offset) => op!(writer, I64LOAD, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), F32Load(flags, offset) => op!(writer, F32LOAD, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), F64Load(flags, offset) => op!(writer, F64LOAD, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I32Load8S(flags, offset) => op!(writer, I32LOAD8S, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I32Load8U(flags, offset) => op!(writer, I32LOAD8U, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I32Load16S(flags, offset) => op!(writer, I32LOAD16S, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I32Load16U(flags, offset) => op!(writer, I32LOAD16U, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Load8S(flags, offset) => op!(writer, I64LOAD8S, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Load8U(flags, offset) => op!(writer, I64LOAD8U, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Load16S(flags, offset) => op!(writer, I64LOAD16S, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Load16U(flags, offset) => op!(writer, I64LOAD16U, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Load32S(flags, offset) => op!(writer, I64LOAD32S, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Load32U(flags, offset) => op!(writer, I64LOAD32U, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I32Store(flags, offset) => op!(writer, I32STORE, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Store(flags, offset) => op!(writer, I64STORE, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), F32Store(flags, offset) => op!(writer, F32STORE, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), F64Store(flags, offset) => op!(writer, F64STORE, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I32Store8(flags, offset) => op!(writer, I32STORE8, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I32Store16(flags, offset) => op!(writer, I32STORE16, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Store8(flags, offset) => op!(writer, I64STORE8, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Store16(flags, offset) => op!(writer, I64STORE16, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), I64Store32(flags, offset) => op!(writer, I64STORE32, { VarUint32::from(flags).serialize(writer)?; VarUint32::from(offset).serialize(writer)?; }), CurrentMemory(flag) => op!(writer, CURRENTMEMORY, { Uint8::from(flag).serialize(writer)?; }), GrowMemory(flag) => op!(writer, GROWMEMORY, { Uint8::from(flag).serialize(writer)?; }), I32Const(def) => op!(writer, I32CONST, { VarInt32::from(def).serialize(writer)?; }), I64Const(def) => op!(writer, I64CONST, { VarInt64::from(def).serialize(writer)?; }), F32Const(def) => op!(writer, F32CONST, { Uint32::from(def).serialize(writer)?; }), F64Const(def) => op!(writer, F64CONST, { Uint64::from(def).serialize(writer)?; }), I32Eqz => op!(writer, I32EQZ), I32Eq => op!(writer, I32EQ), I32Ne => op!(writer, I32NE), I32LtS => op!(writer, I32LTS), I32LtU => op!(writer, I32LTU), I32GtS => op!(writer, I32GTS), I32GtU => op!(writer, I32GTU), I32LeS => op!(writer, I32LES), I32LeU => op!(writer, I32LEU), I32GeS => op!(writer, I32GES), I32GeU => op!(writer, I32GEU), I64Eqz => op!(writer, I64EQZ), I64Eq => op!(writer, I64EQ), I64Ne => op!(writer, I64NE), I64LtS => op!(writer, I64LTS), I64LtU => op!(writer, I64LTU), I64GtS => op!(writer, I64GTS), I64GtU => op!(writer, I64GTU), I64LeS => op!(writer, I64LES), I64LeU => op!(writer, I64LEU), I64GeS => op!(writer, I64GES), I64GeU => op!(writer, I64GEU), F32Eq => op!(writer, F32EQ), F32Ne => op!(writer, F32NE), F32Lt => op!(writer, F32LT), F32Gt => op!(writer, F32GT), F32Le => op!(writer, F32LE), F32Ge => op!(writer, F32GE), F64Eq => op!(writer, F64EQ), F64Ne => op!(writer, F64NE), F64Lt => op!(writer, F64LT), F64Gt => op!(writer, F64GT), F64Le => op!(writer, F64LE), F64Ge => op!(writer, F64GE), I32Clz => op!(writer, I32CLZ), I32Ctz => op!(writer, I32CTZ), I32Popcnt => op!(writer, I32POPCNT), I32Add => op!(writer, I32ADD), I32Sub => op!(writer, I32SUB), I32Mul => op!(writer, I32MUL), I32DivS => op!(writer, I32DIVS), I32DivU => op!(writer, I32DIVU), I32RemS => op!(writer, I32REMS), I32RemU => op!(writer, I32REMU), I32And => op!(writer, I32AND), I32Or => op!(writer, I32OR), I32Xor => op!(writer, I32XOR), I32Shl => op!(writer, I32SHL), I32ShrS => op!(writer, I32SHRS), I32ShrU => op!(writer, I32SHRU), I32Rotl => op!(writer, I32ROTL), I32Rotr => op!(writer, I32ROTR), I64Clz => op!(writer, I64CLZ), I64Ctz => op!(writer, I64CTZ), I64Popcnt => op!(writer, I64POPCNT), I64Add => op!(writer, I64ADD), I64Sub => op!(writer, I64SUB), I64Mul => op!(writer, I64MUL), I64DivS => op!(writer, I64DIVS), I64DivU => op!(writer, I64DIVU), I64RemS => op!(writer, I64REMS), I64RemU => op!(writer, I64REMU), I64And => op!(writer, I64AND), I64Or => op!(writer, I64OR), I64Xor => op!(writer, I64XOR), I64Shl => op!(writer, I64SHL), I64ShrS => op!(writer, I64SHRS), I64ShrU => op!(writer, I64SHRU), I64Rotl => op!(writer, I64ROTL), I64Rotr => op!(writer, I64ROTR), F32Abs => op!(writer, F32ABS), F32Neg => op!(writer, F32NEG), F32Ceil => op!(writer, F32CEIL), F32Floor => op!(writer, F32FLOOR), F32Trunc => op!(writer, F32TRUNC), F32Nearest => op!(writer, F32NEAREST), F32Sqrt => op!(writer, F32SQRT), F32Add => op!(writer, F32ADD), F32Sub => op!(writer, F32SUB), F32Mul => op!(writer, F32MUL), F32Div => op!(writer, F32DIV), F32Min => op!(writer, F32MIN), F32Max => op!(writer, F32MAX), F32Copysign => op!(writer, F32COPYSIGN), F64Abs => op!(writer, F64ABS), F64Neg => op!(writer, F64NEG), F64Ceil => op!(writer, F64CEIL), F64Floor => op!(writer, F64FLOOR), F64Trunc => op!(writer, F64TRUNC), F64Nearest => op!(writer, F64NEAREST), F64Sqrt => op!(writer, F64SQRT), F64Add => op!(writer, F64ADD), F64Sub => op!(writer, F64SUB), F64Mul => op!(writer, F64MUL), F64Div => op!(writer, F64DIV), F64Min => op!(writer, F64MIN), F64Max => op!(writer, F64MAX), F64Copysign => op!(writer, F64COPYSIGN), I32WrapI64 => op!(writer, I32WRAPI64), I32TruncSF32 => op!(writer, I32TRUNCSF32), I32TruncUF32 => op!(writer, I32TRUNCUF32), I32TruncSF64 => op!(writer, I32TRUNCSF64), I32TruncUF64 => op!(writer, I32TRUNCUF64), I64ExtendSI32 => op!(writer, I64EXTENDSI32), I64ExtendUI32 => op!(writer, I64EXTENDUI32), I64TruncSF32 => op!(writer, I64TRUNCSF32), I64TruncUF32 => op!(writer, I64TRUNCUF32), I64TruncSF64 => op!(writer, I64TRUNCSF64), I64TruncUF64 => op!(writer, I64TRUNCUF64), F32ConvertSI32 => op!(writer, F32CONVERTSI32), F32ConvertUI32 => op!(writer, F32CONVERTUI32), F32ConvertSI64 => op!(writer, F32CONVERTSI64), F32ConvertUI64 => op!(writer, F32CONVERTUI64), F32DemoteF64 => op!(writer, F32DEMOTEF64), F64ConvertSI32 => op!(writer, F64CONVERTSI32), F64ConvertUI32 => op!(writer, F64CONVERTUI32), F64ConvertSI64 => op!(writer, F64CONVERTSI64), F64ConvertUI64 => op!(writer, F64CONVERTUI64), F64PromoteF32 => op!(writer, F64PROMOTEF32), I32ReinterpretF32 => op!(writer, I32REINTERPRETF32), I64ReinterpretF64 => op!(writer, I64REINTERPRETF64), F32ReinterpretI32 => op!(writer, F32REINTERPRETI32), F64ReinterpretI64 => op!(writer, F64REINTERPRETI64), #[cfg(feature = "sign_ext")] SignExt(ref a) => match *a { SignExtInstruction::I32Extend8S => op!(writer, sign_ext::I32_EXTEND8_S), SignExtInstruction::I32Extend16S => op!(writer, sign_ext::I32_EXTEND16_S), SignExtInstruction::I64Extend8S => op!(writer, sign_ext::I64_EXTEND8_S), SignExtInstruction::I64Extend16S => op!(writer, sign_ext::I64_EXTEND16_S), SignExtInstruction::I64Extend32S => op!(writer, sign_ext::I64_EXTEND32_S), }, #[cfg(feature = "atomics")] Atomics(a) => return a.serialize(writer), #[cfg(feature = "simd")] Simd(a) => return a.serialize(writer), #[cfg(feature = "bulk")] Bulk(a) => return a.serialize(writer), #[cfg(feature = "tail_calls")] ReturnCall(index) => op!(writer, RETURNCALL, { VarUint32::from(index).serialize(writer)?; }), #[cfg(feature = "tail_calls")] ReturnCallIndirect(index, reserved) => op!(writer, RETURNCALLINDIRECT, { VarUint32::from(index).serialize(writer)?; Uint8::from(reserved).serialize(writer)?; }), } Ok(()) } } #[cfg(feature = "atomics")] impl Serialize for AtomicsInstruction { type Error = Error; fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> { use self::{opcodes::atomics::*, AtomicsInstruction::*}; match self { AtomicWake(m) => atomic!(writer, ATOMIC_WAKE, m), I32AtomicWait(m) => atomic!(writer, I32_ATOMIC_WAIT, m), I64AtomicWait(m) => atomic!(writer, I64_ATOMIC_WAIT, m), I32AtomicLoad(m) => atomic!(writer, I32_ATOMIC_LOAD, m), I64AtomicLoad(m) => atomic!(writer, I64_ATOMIC_LOAD, m), I32AtomicLoad8u(m) => atomic!(writer, I32_ATOMIC_LOAD8U, m), I32AtomicLoad16u(m) => atomic!(writer, I32_ATOMIC_LOAD16U, m), I64AtomicLoad8u(m) => atomic!(writer, I64_ATOMIC_LOAD8U, m), I64AtomicLoad16u(m) => atomic!(writer, I64_ATOMIC_LOAD16U, m), I64AtomicLoad32u(m) => atomic!(writer, I64_ATOMIC_LOAD32U, m), I32AtomicStore(m) => atomic!(writer, I32_ATOMIC_STORE, m), I64AtomicStore(m) => atomic!(writer, I64_ATOMIC_STORE, m), I32AtomicStore8u(m) => atomic!(writer, I32_ATOMIC_STORE8U, m), I32AtomicStore16u(m) => atomic!(writer, I32_ATOMIC_STORE16U, m), I64AtomicStore8u(m) => atomic!(writer, I64_ATOMIC_STORE8U, m), I64AtomicStore16u(m) => atomic!(writer, I64_ATOMIC_STORE16U, m), I64AtomicStore32u(m) => atomic!(writer, I64_ATOMIC_STORE32U, m), I32AtomicRmwAdd(m) => atomic!(writer, I32_ATOMIC_RMW_ADD, m), I64AtomicRmwAdd(m) => atomic!(writer, I64_ATOMIC_RMW_ADD, m), I32AtomicRmwAdd8u(m) => atomic!(writer, I32_ATOMIC_RMW_ADD8U, m), I32AtomicRmwAdd16u(m) => atomic!(writer, I32_ATOMIC_RMW_ADD16U, m), I64AtomicRmwAdd8u(m) => atomic!(writer, I64_ATOMIC_RMW_ADD8U, m), I64AtomicRmwAdd16u(m) => atomic!(writer, I64_ATOMIC_RMW_ADD16U, m), I64AtomicRmwAdd32u(m) => atomic!(writer, I64_ATOMIC_RMW_ADD32U, m), I32AtomicRmwSub(m) => atomic!(writer, I32_ATOMIC_RMW_SUB, m), I64AtomicRmwSub(m) => atomic!(writer, I64_ATOMIC_RMW_SUB, m), I32AtomicRmwSub8u(m) => atomic!(writer, I32_ATOMIC_RMW_SUB8U, m), I32AtomicRmwSub16u(m) => atomic!(writer, I32_ATOMIC_RMW_SUB16U, m), I64AtomicRmwSub8u(m) => atomic!(writer, I64_ATOMIC_RMW_SUB8U, m), I64AtomicRmwSub16u(m) => atomic!(writer, I64_ATOMIC_RMW_SUB16U, m), I64AtomicRmwSub32u(m) => atomic!(writer, I64_ATOMIC_RMW_SUB32U, m), I32AtomicRmwAnd(m) => atomic!(writer, I32_ATOMIC_RMW_AND, m), I64AtomicRmwAnd(m) => atomic!(writer, I64_ATOMIC_RMW_AND, m), I32AtomicRmwAnd8u(m) => atomic!(writer, I32_ATOMIC_RMW_AND8U, m), I32AtomicRmwAnd16u(m) => atomic!(writer, I32_ATOMIC_RMW_AND16U, m), I64AtomicRmwAnd8u(m) => atomic!(writer, I64_ATOMIC_RMW_AND8U, m), I64AtomicRmwAnd16u(m) => atomic!(writer, I64_ATOMIC_RMW_AND16U, m), I64AtomicRmwAnd32u(m) => atomic!(writer, I64_ATOMIC_RMW_AND32U, m), I32AtomicRmwOr(m) => atomic!(writer, I32_ATOMIC_RMW_OR, m), I64AtomicRmwOr(m) => atomic!(writer, I64_ATOMIC_RMW_OR, m), I32AtomicRmwOr8u(m) => atomic!(writer, I32_ATOMIC_RMW_OR8U, m), I32AtomicRmwOr16u(m) => atomic!(writer, I32_ATOMIC_RMW_OR16U, m), I64AtomicRmwOr8u(m) => atomic!(writer, I64_ATOMIC_RMW_OR8U, m), I64AtomicRmwOr16u(m) => atomic!(writer, I64_ATOMIC_RMW_OR16U, m), I64AtomicRmwOr32u(m) => atomic!(writer, I64_ATOMIC_RMW_OR32U, m), I32AtomicRmwXor(m) => atomic!(writer, I32_ATOMIC_RMW_XOR, m), I64AtomicRmwXor(m) => atomic!(writer, I64_ATOMIC_RMW_XOR, m), I32AtomicRmwXor8u(m) => atomic!(writer, I32_ATOMIC_RMW_XOR8U, m), I32AtomicRmwXor16u(m) => atomic!(writer, I32_ATOMIC_RMW_XOR16U, m), I64AtomicRmwXor8u(m) => atomic!(writer, I64_ATOMIC_RMW_XOR8U, m), I64AtomicRmwXor16u(m) => atomic!(writer, I64_ATOMIC_RMW_XOR16U, m), I64AtomicRmwXor32u(m) => atomic!(writer, I64_ATOMIC_RMW_XOR32U, m), I32AtomicRmwXchg(m) => atomic!(writer, I32_ATOMIC_RMW_XCHG, m), I64AtomicRmwXchg(m) => atomic!(writer, I64_ATOMIC_RMW_XCHG, m), I32AtomicRmwXchg8u(m) => atomic!(writer, I32_ATOMIC_RMW_XCHG8U, m), I32AtomicRmwXchg16u(m) => atomic!(writer, I32_ATOMIC_RMW_XCHG16U, m), I64AtomicRmwXchg8u(m) => atomic!(writer, I64_ATOMIC_RMW_XCHG8U, m), I64AtomicRmwXchg16u(m) => atomic!(writer, I64_ATOMIC_RMW_XCHG16U, m), I64AtomicRmwXchg32u(m) => atomic!(writer, I64_ATOMIC_RMW_XCHG32U, m), I32AtomicRmwCmpxchg(m) => atomic!(writer, I32_ATOMIC_RMW_CMPXCHG, m), I64AtomicRmwCmpxchg(m) => atomic!(writer, I64_ATOMIC_RMW_CMPXCHG, m), I32AtomicRmwCmpxchg8u(m) => atomic!(writer, I32_ATOMIC_RMW_CMPXCHG8U, m), I32AtomicRmwCmpxchg16u(m) => atomic!(writer, I32_ATOMIC_RMW_CMPXCHG16U, m), I64AtomicRmwCmpxchg8u(m) => atomic!(writer, I64_ATOMIC_RMW_CMPXCHG8U, m), I64AtomicRmwCmpxchg16u(m) => atomic!(writer, I64_ATOMIC_RMW_CMPXCHG16U, m), I64AtomicRmwCmpxchg32u(m) => atomic!(writer, I64_ATOMIC_RMW_CMPXCHG32U, m), } Ok(()) } } #[cfg(feature = "simd")] impl Serialize for SimdInstruction { type Error = Error; fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> { use self::{opcodes::simd::*, SimdInstruction::*}; match self { V128Const(ref c) => simd!(writer, V128_CONST, writer.write(&c[..])?), V128Load(m) => simd!(writer, V128_LOAD, MemArg::serialize(m, writer)?), V128Store(m) => simd!(writer, V128_STORE, MemArg::serialize(m, writer)?), I8x16Splat => simd!(writer, I8X16_SPLAT, {}), I16x8Splat => simd!(writer, I16X8_SPLAT, {}), I32x4Splat => simd!(writer, I32X4_SPLAT, {}), I64x2Splat => simd!(writer, I64X2_SPLAT, {}), F32x4Splat => simd!(writer, F32X4_SPLAT, {}), F64x2Splat => simd!(writer, F64X2_SPLAT, {}), I8x16ExtractLaneS(i) => simd!(writer, I8X16_EXTRACT_LANE_S, writer.write(&[i])?), I8x16ExtractLaneU(i) => simd!(writer, I8X16_EXTRACT_LANE_U, writer.write(&[i])?), I16x8ExtractLaneS(i) => simd!(writer, I16X8_EXTRACT_LANE_S, writer.write(&[i])?), I16x8ExtractLaneU(i) => simd!(writer, I16X8_EXTRACT_LANE_U, writer.write(&[i])?), I32x4ExtractLane(i) => simd!(writer, I32X4_EXTRACT_LANE, writer.write(&[i])?), I64x2ExtractLane(i) => simd!(writer, I64X2_EXTRACT_LANE, writer.write(&[i])?), F32x4ExtractLane(i) => simd!(writer, F32X4_EXTRACT_LANE, writer.write(&[i])?), F64x2ExtractLane(i) => simd!(writer, F64X2_EXTRACT_LANE, writer.write(&[i])?), I8x16ReplaceLane(i) => simd!(writer, I8X16_REPLACE_LANE, writer.write(&[i])?), I16x8ReplaceLane(i) => simd!(writer, I16X8_REPLACE_LANE, writer.write(&[i])?), I32x4ReplaceLane(i) => simd!(writer, I32X4_REPLACE_LANE, writer.write(&[i])?), I64x2ReplaceLane(i) => simd!(writer, I64X2_REPLACE_LANE, writer.write(&[i])?), F32x4ReplaceLane(i) => simd!(writer, F32X4_REPLACE_LANE, writer.write(&[i])?), F64x2ReplaceLane(i) => simd!(writer, F64X2_REPLACE_LANE, writer.write(&[i])?), V8x16Shuffle(ref i) => simd!(writer, V8X16_SHUFFLE, writer.write(&i[..])?), I8x16Add => simd!(writer, I8X16_ADD, {}), I16x8Add => simd!(writer, I16X8_ADD, {}), I32x4Add => simd!(writer, I32X4_ADD, {}), I64x2Add => simd!(writer, I64X2_ADD, {}), I8x16Sub => simd!(writer, I8X16_SUB, {}), I16x8Sub => simd!(writer, I16X8_SUB, {}), I32x4Sub => simd!(writer, I32X4_SUB, {}), I64x2Sub => simd!(writer, I64X2_SUB, {}), I8x16Mul => simd!(writer, I8X16_MUL, {}), I16x8Mul => simd!(writer, I16X8_MUL, {}), I32x4Mul => simd!(writer, I32X4_MUL, {}), // I64x2Mul => simd!(writer, I64X2_MUL, {}), I8x16Neg => simd!(writer, I8X16_NEG, {}), I16x8Neg => simd!(writer, I16X8_NEG, {}), I32x4Neg => simd!(writer, I32X4_NEG, {}), I64x2Neg => simd!(writer, I64X2_NEG, {}), I8x16AddSaturateS => simd!(writer, I8X16_ADD_SATURATE_S, {}), I8x16AddSaturateU => simd!(writer, I8X16_ADD_SATURATE_U, {}), I16x8AddSaturateS => simd!(writer, I16X8_ADD_SATURATE_S, {}), I16x8AddSaturateU => simd!(writer, I16X8_ADD_SATURATE_U, {}), I8x16SubSaturateS => simd!(writer, I8X16_SUB_SATURATE_S, {}), I8x16SubSaturateU => simd!(writer, I8X16_SUB_SATURATE_U, {}), I16x8SubSaturateS => simd!(writer, I16X8_SUB_SATURATE_S, {}), I16x8SubSaturateU => simd!(writer, I16X8_SUB_SATURATE_U, {}), I8x16Shl => simd!(writer, I8X16_SHL, {}), I16x8Shl => simd!(writer, I16X8_SHL, {}), I32x4Shl => simd!(writer, I32X4_SHL, {}), I64x2Shl => simd!(writer, I64X2_SHL, {}), I8x16ShrS => simd!(writer, I8X16_SHR_S, {}), I8x16ShrU => simd!(writer, I8X16_SHR_U, {}), I16x8ShrS => simd!(writer, I16X8_SHR_S, {}), I16x8ShrU => simd!(writer, I16X8_SHR_U, {}), I32x4ShrU => simd!(writer, I32X4_SHR_U, {}), I32x4ShrS => simd!(writer, I32X4_SHR_S, {}), I64x2ShrU => simd!(writer, I64X2_SHR_U, {}), I64x2ShrS => simd!(writer, I64X2_SHR_S, {}), V128And => simd!(writer, V128_AND, {}), V128Or => simd!(writer, V128_OR, {}), V128Xor => simd!(writer, V128_XOR, {}), V128Not => simd!(writer, V128_NOT, {}), V128Bitselect => simd!(writer, V128_BITSELECT, {}), I8x16AnyTrue => simd!(writer, I8X16_ANY_TRUE, {}), I16x8AnyTrue => simd!(writer, I16X8_ANY_TRUE, {}), I32x4AnyTrue => simd!(writer, I32X4_ANY_TRUE, {}), I64x2AnyTrue => simd!(writer, I64X2_ANY_TRUE, {}), I8x16AllTrue => simd!(writer, I8X16_ALL_TRUE, {}), I16x8AllTrue => simd!(writer, I16X8_ALL_TRUE, {}), I32x4AllTrue => simd!(writer, I32X4_ALL_TRUE, {}), I64x2AllTrue => simd!(writer, I64X2_ALL_TRUE, {}), I8x16Eq => simd!(writer, I8X16_EQ, {}), I16x8Eq => simd!(writer, I16X8_EQ, {}), I32x4Eq => simd!(writer, I32X4_EQ, {}), // I64x2Eq => simd!(writer, I64X2_EQ, {}), F32x4Eq => simd!(writer, F32X4_EQ, {}), F64x2Eq => simd!(writer, F64X2_EQ, {}), I8x16Ne => simd!(writer, I8X16_NE, {}), I16x8Ne => simd!(writer, I16X8_NE, {}), I32x4Ne => simd!(writer, I32X4_NE, {}), // I64x2Ne => simd!(writer, I64X2_NE, {}), F32x4Ne => simd!(writer, F32X4_NE, {}), F64x2Ne => simd!(writer, F64X2_NE, {}), I8x16LtS => simd!(writer, I8X16_LT_S, {}), I8x16LtU => simd!(writer, I8X16_LT_U, {}), I16x8LtS => simd!(writer, I16X8_LT_S, {}), I16x8LtU => simd!(writer, I16X8_LT_U, {}), I32x4LtS => simd!(writer, I32X4_LT_S, {}), I32x4LtU => simd!(writer, I32X4_LT_U, {}), // I64x2LtS => simd!(writer, I64X2_LT_S, {}), // I64x2LtU => simd!(writer, I64X2_LT_U, {}), F32x4Lt => simd!(writer, F32X4_LT, {}), F64x2Lt => simd!(writer, F64X2_LT, {}), I8x16LeS => simd!(writer, I8X16_LE_S, {}), I8x16LeU => simd!(writer, I8X16_LE_U, {}), I16x8LeS => simd!(writer, I16X8_LE_S, {}), I16x8LeU => simd!(writer, I16X8_LE_U, {}), I32x4LeS => simd!(writer, I32X4_LE_S, {}), I32x4LeU => simd!(writer, I32X4_LE_U, {}), // I64x2LeS => simd!(writer, I64X2_LE_S, {}), // I64x2LeU => simd!(writer, I64X2_LE_U, {}), F32x4Le => simd!(writer, F32X4_LE, {}), F64x2Le => simd!(writer, F64X2_LE, {}), I8x16GtS => simd!(writer, I8X16_GT_S, {}), I8x16GtU => simd!(writer, I8X16_GT_U, {}), I16x8GtS => simd!(writer, I16X8_GT_S, {}), I16x8GtU => simd!(writer, I16X8_GT_U, {}), I32x4GtS => simd!(writer, I32X4_GT_S, {}), I32x4GtU => simd!(writer, I32X4_GT_U, {}), // I64x2GtS => simd!(writer, I64X2_GT_S, {}), // I64x2GtU => simd!(writer, I64X2_GT_U, {}), F32x4Gt => simd!(writer, F32X4_GT, {}), F64x2Gt => simd!(writer, F64X2_GT, {}), I8x16GeS => simd!(writer, I8X16_GE_S, {}), I8x16GeU => simd!(writer, I8X16_GE_U, {}), I16x8GeS => simd!(writer, I16X8_GE_S, {}), I16x8GeU => simd!(writer, I16X8_GE_U, {}), I32x4GeS => simd!(writer, I32X4_GE_S, {}), I32x4GeU => simd!(writer, I32X4_GE_U, {}), // I64x2GeS => simd!(writer, I64X2_GE_S, {}), // I64x2GeU => simd!(writer, I64X2_GE_U, {}), F32x4Ge => simd!(writer, F32X4_GE, {}), F64x2Ge => simd!(writer, F64X2_GE, {}), F32x4Neg => simd!(writer, F32X4_NEG, {}), F64x2Neg => simd!(writer, F64X2_NEG, {}), F32x4Abs => simd!(writer, F32X4_ABS, {}), F64x2Abs => simd!(writer, F64X2_ABS, {}), F32x4Min => simd!(writer, F32X4_MIN, {}), F64x2Min => simd!(writer, F64X2_MIN, {}), F32x4Max => simd!(writer, F32X4_MAX, {}), F64x2Max => simd!(writer, F64X2_MAX, {}), F32x4Add => simd!(writer, F32X4_ADD, {}), F64x2Add => simd!(writer, F64X2_ADD, {}), F32x4Sub => simd!(writer, F32X4_SUB, {}), F64x2Sub => simd!(writer, F64X2_SUB, {}), F32x4Div => simd!(writer, F32X4_DIV, {}), F64x2Div => simd!(writer, F64X2_DIV, {}), F32x4Mul => simd!(writer, F32X4_MUL, {}), F64x2Mul => simd!(writer, F64X2_MUL, {}), F32x4Sqrt => simd!(writer, F32X4_SQRT, {}), F64x2Sqrt => simd!(writer, F64X2_SQRT, {}), F32x4ConvertSI32x4 => simd!(writer, F32X4_CONVERT_S_I32X4, {}), F32x4ConvertUI32x4 => simd!(writer, F32X4_CONVERT_U_I32X4, {}), F64x2ConvertSI64x2 => simd!(writer, F64X2_CONVERT_S_I64X2, {}), F64x2ConvertUI64x2 => simd!(writer, F64X2_CONVERT_U_I64X2, {}), I32x4TruncSF32x4Sat => simd!(writer, I32X4_TRUNC_S_F32X4_SAT, {}), I32x4TruncUF32x4Sat => simd!(writer, I32X4_TRUNC_U_F32X4_SAT, {}), I64x2TruncSF64x2Sat => simd!(writer, I64X2_TRUNC_S_F64X2_SAT, {}), I64x2TruncUF64x2Sat => simd!(writer, I64X2_TRUNC_U_F64X2_SAT, {}), } Ok(()) } } #[cfg(feature = "bulk")] impl Serialize for BulkInstruction { type Error = Error; fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> { use self::{opcodes::bulk::*, BulkInstruction::*}; match self { MemoryInit(seg) => bulk!(writer, MEMORY_INIT, { Uint8::from(0).serialize(writer)?; VarUint32::from(seg).serialize(writer)?; }), MemoryDrop(seg) => bulk!(writer, MEMORY_DROP, VarUint32::from(seg).serialize(writer)?), MemoryFill => bulk!(writer, MEMORY_FILL, Uint8::from(0).serialize(writer)?), MemoryCopy => bulk!(writer, MEMORY_COPY, Uint8::from(0).serialize(writer)?), TableInit(seg) => bulk!(writer, TABLE_INIT, { Uint8::from(0).serialize(writer)?; VarUint32::from(seg).serialize(writer)?; }), TableDrop(seg) => bulk!(writer, TABLE_DROP, VarUint32::from(seg).serialize(writer)?), TableCopy => bulk!(writer, TABLE_COPY, Uint8::from(0).serialize(writer)?), } Ok(()) } } #[cfg(any(feature = "simd", feature = "atomics"))] impl Serialize for MemArg { type Error = Error; fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> { Uint8::from(self.align).serialize(writer)?; VarUint32::from(self.offset).serialize(writer)?; Ok(()) } } macro_rules! fmt_op { ($f: expr, $mnemonic: expr) => {{ write!($f, "{}", $mnemonic) }}; ($f: expr, $mnemonic: expr, $immediate: expr) => {{ write!($f, "{} {}", $mnemonic, $immediate) }}; ($f: expr, $mnemonic: expr, $immediate1: expr, $immediate2: expr) => {{ write!($f, "{} {} {}", $mnemonic, $immediate1, $immediate2) }}; } impl fmt::Display for Instruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Instruction::*; match *self { Unreachable => fmt_op!(f, "unreachable"), Nop => fmt_op!(f, "nop"), Block(BlockType::NoResult) => fmt_op!(f, "block"), Block(BlockType::Value(value_type)) => fmt_op!(f, "block", value_type), #[cfg(feature = "multi_value")] Block(BlockType::TypeIndex(idx)) => write!(f, "block type_idx={}", idx), Loop(BlockType::NoResult) => fmt_op!(f, "loop"), Loop(BlockType::Value(value_type)) => fmt_op!(f, "loop", value_type), #[cfg(feature = "multi_value")] Loop(BlockType::TypeIndex(idx)) => write!(f, "loop type_idx={}", idx), If(BlockType::NoResult) => fmt_op!(f, "if"), If(BlockType::Value(value_type)) => fmt_op!(f, "if", value_type), #[cfg(feature = "multi_value")] If(BlockType::TypeIndex(idx)) => write!(f, "if type_idx={}", idx), Else => fmt_op!(f, "else"), End => fmt_op!(f, "end"), Br(idx) => fmt_op!(f, "br", idx), BrIf(idx) => fmt_op!(f, "br_if", idx), BrTable(ref table) => fmt_op!(f, "br_table", table.default), Return => fmt_op!(f, "return"), Call(index) => fmt_op!(f, "call", index), CallIndirect(index, _) => fmt_op!(f, "call_indirect", index), Drop => fmt_op!(f, "drop"), Select => fmt_op!(f, "select"), GetLocal(index) => fmt_op!(f, "get_local", index), SetLocal(index) => fmt_op!(f, "set_local", index), TeeLocal(index) => fmt_op!(f, "tee_local", index), GetGlobal(index) => fmt_op!(f, "get_global", index), SetGlobal(index) => fmt_op!(f, "set_global", index), I32Load(_, 0) => write!(f, "i32.load"), I32Load(_, offset) => write!(f, "i32.load offset={}", offset), I64Load(_, 0) => write!(f, "i64.load"), I64Load(_, offset) => write!(f, "i64.load offset={}", offset), F32Load(_, 0) => write!(f, "f32.load"), F32Load(_, offset) => write!(f, "f32.load offset={}", offset), F64Load(_, 0) => write!(f, "f64.load"), F64Load(_, offset) => write!(f, "f64.load offset={}", offset), I32Load8S(_, 0) => write!(f, "i32.load8_s"), I32Load8S(_, offset) => write!(f, "i32.load8_s offset={}", offset), I32Load8U(_, 0) => write!(f, "i32.load8_u"), I32Load8U(_, offset) => write!(f, "i32.load8_u offset={}", offset), I32Load16S(_, 0) => write!(f, "i32.load16_s"), I32Load16S(_, offset) => write!(f, "i32.load16_s offset={}", offset), I32Load16U(_, 0) => write!(f, "i32.load16_u"), I32Load16U(_, offset) => write!(f, "i32.load16_u offset={}", offset), I64Load8S(_, 0) => write!(f, "i64.load8_s"), I64Load8S(_, offset) => write!(f, "i64.load8_s offset={}", offset), I64Load8U(_, 0) => write!(f, "i64.load8_u"), I64Load8U(_, offset) => write!(f, "i64.load8_u offset={}", offset), I64Load16S(_, 0) => write!(f, "i64.load16_s"), I64Load16S(_, offset) => write!(f, "i64.load16_s offset={}", offset), I64Load16U(_, 0) => write!(f, "i64.load16_u"), I64Load16U(_, offset) => write!(f, "i64.load16_u offset={}", offset), I64Load32S(_, 0) => write!(f, "i64.load32_s"), I64Load32S(_, offset) => write!(f, "i64.load32_s offset={}", offset), I64Load32U(_, 0) => write!(f, "i64.load32_u"), I64Load32U(_, offset) => write!(f, "i64.load32_u offset={}", offset), I32Store(_, 0) => write!(f, "i32.store"), I32Store(_, offset) => write!(f, "i32.store offset={}", offset), I64Store(_, 0) => write!(f, "i64.store"), I64Store(_, offset) => write!(f, "i64.store offset={}", offset), F32Store(_, 0) => write!(f, "f32.store"), F32Store(_, offset) => write!(f, "f32.store offset={}", offset), F64Store(_, 0) => write!(f, "f64.store"), F64Store(_, offset) => write!(f, "f64.store offset={}", offset), I32Store8(_, 0) => write!(f, "i32.store8"), I32Store8(_, offset) => write!(f, "i32.store8 offset={}", offset), I32Store16(_, 0) => write!(f, "i32.store16"), I32Store16(_, offset) => write!(f, "i32.store16 offset={}", offset), I64Store8(_, 0) => write!(f, "i64.store8"), I64Store8(_, offset) => write!(f, "i64.store8 offset={}", offset), I64Store16(_, 0) => write!(f, "i64.store16"), I64Store16(_, offset) => write!(f, "i64.store16 offset={}", offset), I64Store32(_, 0) => write!(f, "i64.store32"), I64Store32(_, offset) => write!(f, "i64.store32 offset={}", offset), CurrentMemory(_) => fmt_op!(f, "current_memory"), GrowMemory(_) => fmt_op!(f, "grow_memory"), I32Const(def) => fmt_op!(f, "i32.const", def), I64Const(def) => fmt_op!(f, "i64.const", def), F32Const(def) => fmt_op!(f, "f32.const", def), F64Const(def) => fmt_op!(f, "f64.const", def), I32Eq => write!(f, "i32.eq"), I32Eqz => write!(f, "i32.eqz"), I32Ne => write!(f, "i32.ne"), I32LtS => write!(f, "i32.lt_s"), I32LtU => write!(f, "i32.lt_u"), I32GtS => write!(f, "i32.gt_s"), I32GtU => write!(f, "i32.gt_u"), I32LeS => write!(f, "i32.le_s"), I32LeU => write!(f, "i32.le_u"), I32GeS => write!(f, "i32.ge_s"), I32GeU => write!(f, "i32.ge_u"), I64Eq => write!(f, "i64.eq"), I64Eqz => write!(f, "i64.eqz"), I64Ne => write!(f, "i64.ne"), I64LtS => write!(f, "i64.lt_s"), I64LtU => write!(f, "i64.lt_u"), I64GtS => write!(f, "i64.gt_s"), I64GtU => write!(f, "i64.gt_u"), I64LeS => write!(f, "i64.le_s"), I64LeU => write!(f, "i64.le_u"), I64GeS => write!(f, "i64.ge_s"), I64GeU => write!(f, "i64.ge_u"), F32Eq => write!(f, "f32.eq"), F32Ne => write!(f, "f32.ne"), F32Lt => write!(f, "f32.lt"), F32Gt => write!(f, "f32.gt"), F32Le => write!(f, "f32.le"), F32Ge => write!(f, "f32.ge"), F64Eq => write!(f, "f64.eq"), F64Ne => write!(f, "f64.ne"), F64Lt => write!(f, "f64.lt"), F64Gt => write!(f, "f64.gt"), F64Le => write!(f, "f64.le"), F64Ge => write!(f, "f64.ge"), I32Clz => write!(f, "i32.clz"), I32Ctz => write!(f, "i32.ctz"), I32Popcnt => write!(f, "i32.popcnt"), I32Add => write!(f, "i32.add"), I32Sub => write!(f, "i32.sub"), I32Mul => write!(f, "i32.mul"), I32DivS => write!(f, "i32.div_s"), I32DivU => write!(f, "i32.div_u"), I32RemS => write!(f, "i32.rem_s"), I32RemU => write!(f, "i32.rem_u"), I32And => write!(f, "i32.and"), I32Or => write!(f, "i32.or"), I32Xor => write!(f, "i32.xor"), I32Shl => write!(f, "i32.shl"), I32ShrS => write!(f, "i32.shr_s"), I32ShrU => write!(f, "i32.shr_u"), I32Rotl => write!(f, "i32.rotl"), I32Rotr => write!(f, "i32.rotr"), I64Clz => write!(f, "i64.clz"), I64Ctz => write!(f, "i64.ctz"), I64Popcnt => write!(f, "i64.popcnt"), I64Add => write!(f, "i64.add"), I64Sub => write!(f, "i64.sub"), I64Mul => write!(f, "i64.mul"), I64DivS => write!(f, "i64.div_s"), I64DivU => write!(f, "i64.div_u"), I64RemS => write!(f, "i64.rem_s"), I64RemU => write!(f, "i64.rem_u"), I64And => write!(f, "i64.and"), I64Or => write!(f, "i64.or"), I64Xor => write!(f, "i64.xor"), I64Shl => write!(f, "i64.shl"), I64ShrS => write!(f, "i64.shr_s"), I64ShrU => write!(f, "i64.shr_u"), I64Rotl => write!(f, "i64.rotl"), I64Rotr => write!(f, "i64.rotr"), F32Abs => write!(f, "f32.abs"), F32Neg => write!(f, "f32.neg"), F32Ceil => write!(f, "f32.ceil"), F32Floor => write!(f, "f32.floor"), F32Trunc => write!(f, "f32.trunc"), F32Nearest => write!(f, "f32.nearest"), F32Sqrt => write!(f, "f32.sqrt"), F32Add => write!(f, "f32.add"), F32Sub => write!(f, "f32.sub"), F32Mul => write!(f, "f32.mul"), F32Div => write!(f, "f32.div"), F32Min => write!(f, "f32.min"), F32Max => write!(f, "f32.max"), F32Copysign => write!(f, "f32.copysign"), F64Abs => write!(f, "f64.abs"), F64Neg => write!(f, "f64.neg"), F64Ceil => write!(f, "f64.ceil"), F64Floor => write!(f, "f64.floor"), F64Trunc => write!(f, "f64.trunc"), F64Nearest => write!(f, "f64.nearest"), F64Sqrt => write!(f, "f64.sqrt"), F64Add => write!(f, "f64.add"), F64Sub => write!(f, "f64.sub"), F64Mul => write!(f, "f64.mul"), F64Div => write!(f, "f64.div"), F64Min => write!(f, "f64.min"), F64Max => write!(f, "f64.max"), F64Copysign => write!(f, "f64.copysign"), I32WrapI64 => write!(f, "i32.wrap/i64"), I32TruncSF32 => write!(f, "i32.trunc_s/f32"), I32TruncUF32 => write!(f, "i32.trunc_u/f32"), I32TruncSF64 => write!(f, "i32.trunc_s/f64"), I32TruncUF64 => write!(f, "i32.trunc_u/f64"), I64ExtendSI32 => write!(f, "i64.extend_s/i32"), I64ExtendUI32 => write!(f, "i64.extend_u/i32"), I64TruncSF32 => write!(f, "i64.trunc_s/f32"), I64TruncUF32 => write!(f, "i64.trunc_u/f32"), I64TruncSF64 => write!(f, "i64.trunc_s/f64"), I64TruncUF64 => write!(f, "i64.trunc_u/f64"), F32ConvertSI32 => write!(f, "f32.convert_s/i32"), F32ConvertUI32 => write!(f, "f32.convert_u/i32"), F32ConvertSI64 => write!(f, "f32.convert_s/i64"), F32ConvertUI64 => write!(f, "f32.convert_u/i64"), F32DemoteF64 => write!(f, "f32.demote/f64"), F64ConvertSI32 => write!(f, "f64.convert_s/i32"), F64ConvertUI32 => write!(f, "f64.convert_u/i32"), F64ConvertSI64 => write!(f, "f64.convert_s/i64"), F64ConvertUI64 => write!(f, "f64.convert_u/i64"), F64PromoteF32 => write!(f, "f64.promote/f32"), I32ReinterpretF32 => write!(f, "i32.reinterpret/f32"), I64ReinterpretF64 => write!(f, "i64.reinterpret/f64"), F32ReinterpretI32 => write!(f, "f32.reinterpret/i32"), F64ReinterpretI64 => write!(f, "f64.reinterpret/i64"), #[cfg(feature = "sign_ext")] SignExt(ref i) => match i { SignExtInstruction::I32Extend8S => write!(f, "i32.extend8_s"), SignExtInstruction::I32Extend16S => write!(f, "i32.extend16_s"), SignExtInstruction::I64Extend8S => write!(f, "i64.extend8_s"), SignExtInstruction::I64Extend16S => write!(f, "i64.extend16_s"), SignExtInstruction::I64Extend32S => write!(f, "i64.extend32_s"), }, #[cfg(feature = "atomics")] Atomics(ref i) => i.fmt(f), #[cfg(feature = "simd")] Simd(ref i) => i.fmt(f), #[cfg(feature = "bulk")] Bulk(ref i) => i.fmt(f), #[cfg(feature = "tail_calls")] ReturnCall(index) => fmt_op!(f, "return_call", index), #[cfg(feature = "tail_calls")] ReturnCallIndirect(index, _) => fmt_op!(f, "return_call_indirect", index), } } } #[cfg(feature = "atomics")] impl fmt::Display for AtomicsInstruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::AtomicsInstruction::*; match *self { AtomicWake(_) => write!(f, "atomic.wake"), I32AtomicWait(_) => write!(f, "i32.atomic.wait"), I64AtomicWait(_) => write!(f, "i64.atomic.wait"), I32AtomicLoad(_) => write!(f, "i32.atomic.load"), I64AtomicLoad(_) => write!(f, "i64.atomic.load"), I32AtomicLoad8u(_) => write!(f, "i32.atomic.load8_u"), I32AtomicLoad16u(_) => write!(f, "i32.atomic.load16_u"), I64AtomicLoad8u(_) => write!(f, "i64.atomic.load8_u"), I64AtomicLoad16u(_) => write!(f, "i64.atomic.load16_u"), I64AtomicLoad32u(_) => write!(f, "i64.atomic.load32_u"), I32AtomicStore(_) => write!(f, "i32.atomic.store"), I64AtomicStore(_) => write!(f, "i64.atomic.store"), I32AtomicStore8u(_) => write!(f, "i32.atomic.store8_u"), I32AtomicStore16u(_) => write!(f, "i32.atomic.store16_u"), I64AtomicStore8u(_) => write!(f, "i64.atomic.store8_u"), I64AtomicStore16u(_) => write!(f, "i64.atomic.store16_u"), I64AtomicStore32u(_) => write!(f, "i64.atomic.store32_u"), I32AtomicRmwAdd(_) => write!(f, "i32.atomic.rmw.add"), I64AtomicRmwAdd(_) => write!(f, "i64.atomic.rmw.add"), I32AtomicRmwAdd8u(_) => write!(f, "i32.atomic.rmw8_u.add"), I32AtomicRmwAdd16u(_) => write!(f, "i32.atomic.rmw16_u.add"), I64AtomicRmwAdd8u(_) => write!(f, "i64.atomic.rmw8_u.add"), I64AtomicRmwAdd16u(_) => write!(f, "i64.atomic.rmw16_u.add"), I64AtomicRmwAdd32u(_) => write!(f, "i64.atomic.rmw32_u.add"), I32AtomicRmwSub(_) => write!(f, "i32.atomic.rmw.sub"), I64AtomicRmwSub(_) => write!(f, "i64.atomic.rmw.sub"), I32AtomicRmwSub8u(_) => write!(f, "i32.atomic.rmw8_u.sub"), I32AtomicRmwSub16u(_) => write!(f, "i32.atomic.rmw16_u.sub"), I64AtomicRmwSub8u(_) => write!(f, "i64.atomic.rmw8_u.sub"), I64AtomicRmwSub16u(_) => write!(f, "i64.atomic.rmw16_u.sub"), I64AtomicRmwSub32u(_) => write!(f, "i64.atomic.rmw32_u.sub"), I32AtomicRmwAnd(_) => write!(f, "i32.atomic.rmw.and"), I64AtomicRmwAnd(_) => write!(f, "i64.atomic.rmw.and"), I32AtomicRmwAnd8u(_) => write!(f, "i32.atomic.rmw8_u.and"), I32AtomicRmwAnd16u(_) => write!(f, "i32.atomic.rmw16_u.and"), I64AtomicRmwAnd8u(_) => write!(f, "i64.atomic.rmw8_u.and"), I64AtomicRmwAnd16u(_) => write!(f, "i64.atomic.rmw16_u.and"), I64AtomicRmwAnd32u(_) => write!(f, "i64.atomic.rmw32_u.and"), I32AtomicRmwOr(_) => write!(f, "i32.atomic.rmw.or"), I64AtomicRmwOr(_) => write!(f, "i64.atomic.rmw.or"), I32AtomicRmwOr8u(_) => write!(f, "i32.atomic.rmw8_u.or"), I32AtomicRmwOr16u(_) => write!(f, "i32.atomic.rmw16_u.or"), I64AtomicRmwOr8u(_) => write!(f, "i64.atomic.rmw8_u.or"), I64AtomicRmwOr16u(_) => write!(f, "i64.atomic.rmw16_u.or"), I64AtomicRmwOr32u(_) => write!(f, "i64.atomic.rmw32_u.or"), I32AtomicRmwXor(_) => write!(f, "i32.atomic.rmw.xor"), I64AtomicRmwXor(_) => write!(f, "i64.atomic.rmw.xor"), I32AtomicRmwXor8u(_) => write!(f, "i32.atomic.rmw8_u.xor"), I32AtomicRmwXor16u(_) => write!(f, "i32.atomic.rmw16_u.xor"), I64AtomicRmwXor8u(_) => write!(f, "i64.atomic.rmw8_u.xor"), I64AtomicRmwXor16u(_) => write!(f, "i64.atomic.rmw16_u.xor"), I64AtomicRmwXor32u(_) => write!(f, "i64.atomic.rmw32_u.xor"), I32AtomicRmwXchg(_) => write!(f, "i32.atomic.rmw.xchg"), I64AtomicRmwXchg(_) => write!(f, "i64.atomic.rmw.xchg"), I32AtomicRmwXchg8u(_) => write!(f, "i32.atomic.rmw8_u.xchg"), I32AtomicRmwXchg16u(_) => write!(f, "i32.atomic.rmw16_u.xchg"), I64AtomicRmwXchg8u(_) => write!(f, "i64.atomic.rmw8_u.xchg"), I64AtomicRmwXchg16u(_) => write!(f, "i64.atomic.rmw16_u.xchg"), I64AtomicRmwXchg32u(_) => write!(f, "i64.atomic.rmw32_u.xchg"), I32AtomicRmwCmpxchg(_) => write!(f, "i32.atomic.rmw.cmpxchg"), I64AtomicRmwCmpxchg(_) => write!(f, "i64.atomic.rmw.cmpxchg"), I32AtomicRmwCmpxchg8u(_) => write!(f, "i32.atomic.rmw8_u.cmpxchg"), I32AtomicRmwCmpxchg16u(_) => write!(f, "i32.atomic.rmw16_u.cmpxchg"), I64AtomicRmwCmpxchg8u(_) => write!(f, "i64.atomic.rmw8_u.cmpxchg"), I64AtomicRmwCmpxchg16u(_) => write!(f, "i64.atomic.rmw16_u.cmpxchg"), I64AtomicRmwCmpxchg32u(_) => write!(f, "i64.atomic.rmw32_u.cmpxchg"), } } } #[cfg(feature = "simd")] impl fmt::Display for SimdInstruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::SimdInstruction::*; match *self { V128Const(_) => write!(f, "v128.const"), V128Load(_) => write!(f, "v128.load"), V128Store(_) => write!(f, "v128.store"), I8x16Splat => write!(f, "i8x16.splat"), I16x8Splat => write!(f, "i16x8.splat"), I32x4Splat => write!(f, "i32x4.splat"), I64x2Splat => write!(f, "i64x2.splat"), F32x4Splat => write!(f, "f32x4.splat"), F64x2Splat => write!(f, "f64x2.splat"), I8x16ExtractLaneS(_) => write!(f, "i8x16.extract_lane_s"), I8x16ExtractLaneU(_) => write!(f, "i8x16.extract_lane_u"), I16x8ExtractLaneS(_) => write!(f, "i16x8.extract_lane_s"), I16x8ExtractLaneU(_) => write!(f, "i16x8.extract_lane_u"), I32x4ExtractLane(_) => write!(f, "i32x4.extract_lane"), I64x2ExtractLane(_) => write!(f, "i64x2.extract_lane"), F32x4ExtractLane(_) => write!(f, "f32x4.extract_lane"), F64x2ExtractLane(_) => write!(f, "f64x2.extract_lane"), I8x16ReplaceLane(_) => write!(f, "i8x16.replace_lane"), I16x8ReplaceLane(_) => write!(f, "i16x8.replace_lane"), I32x4ReplaceLane(_) => write!(f, "i32x4.replace_lane"), I64x2ReplaceLane(_) => write!(f, "i64x2.replace_lane"), F32x4ReplaceLane(_) => write!(f, "f32x4.replace_lane"), F64x2ReplaceLane(_) => write!(f, "f64x2.replace_lane"), V8x16Shuffle(_) => write!(f, "v8x16.shuffle"), I8x16Add => write!(f, "i8x16.add"), I16x8Add => write!(f, "i16x8.add"), I32x4Add => write!(f, "i32x4.add"), I64x2Add => write!(f, "i64x2.add"), I8x16Sub => write!(f, "i8x16.sub"), I16x8Sub => write!(f, "i16x8.sub"), I32x4Sub => write!(f, "i32x4.sub"), I64x2Sub => write!(f, "i64x2.sub"), I8x16Mul => write!(f, "i8x16.mul"), I16x8Mul => write!(f, "i16x8.mul"), I32x4Mul => write!(f, "i32x4.mul"), // I64x2Mul => write!(f, "i64x2.mul"), I8x16Neg => write!(f, "i8x16.neg"), I16x8Neg => write!(f, "i16x8.neg"), I32x4Neg => write!(f, "i32x4.neg"), I64x2Neg => write!(f, "i64x2.neg"), I8x16AddSaturateS => write!(f, "i8x16.add_saturate_s"), I8x16AddSaturateU => write!(f, "i8x16.add_saturate_u"), I16x8AddSaturateS => write!(f, "i16x8.add_saturate_S"), I16x8AddSaturateU => write!(f, "i16x8.add_saturate_u"), I8x16SubSaturateS => write!(f, "i8x16.sub_saturate_S"), I8x16SubSaturateU => write!(f, "i8x16.sub_saturate_u"), I16x8SubSaturateS => write!(f, "i16x8.sub_saturate_S"), I16x8SubSaturateU => write!(f, "i16x8.sub_saturate_u"), I8x16Shl => write!(f, "i8x16.shl"), I16x8Shl => write!(f, "i16x8.shl"), I32x4Shl => write!(f, "i32x4.shl"), I64x2Shl => write!(f, "i64x2.shl"), I8x16ShrS => write!(f, "i8x16.shr_s"), I8x16ShrU => write!(f, "i8x16.shr_u"), I16x8ShrS => write!(f, "i16x8.shr_s"), I16x8ShrU => write!(f, "i16x8.shr_u"), I32x4ShrS => write!(f, "i32x4.shr_s"), I32x4ShrU => write!(f, "i32x4.shr_u"), I64x2ShrS => write!(f, "i64x2.shr_s"), I64x2ShrU => write!(f, "i64x2.shr_u"), V128And => write!(f, "v128.and"), V128Or => write!(f, "v128.or"), V128Xor => write!(f, "v128.xor"), V128Not => write!(f, "v128.not"), V128Bitselect => write!(f, "v128.bitselect"), I8x16AnyTrue => write!(f, "i8x16.any_true"), I16x8AnyTrue => write!(f, "i16x8.any_true"), I32x4AnyTrue => write!(f, "i32x4.any_true"), I64x2AnyTrue => write!(f, "i64x2.any_true"), I8x16AllTrue => write!(f, "i8x16.all_true"), I16x8AllTrue => write!(f, "i16x8.all_true"), I32x4AllTrue => write!(f, "i32x4.all_true"), I64x2AllTrue => write!(f, "i64x2.all_true"), I8x16Eq => write!(f, "i8x16.eq"), I16x8Eq => write!(f, "i16x8.eq"), I32x4Eq => write!(f, "i32x4.eq"), // I64x2Eq => write!(f, "i64x2.eq"), F32x4Eq => write!(f, "f32x4.eq"), F64x2Eq => write!(f, "f64x2.eq"), I8x16Ne => write!(f, "i8x16.ne"), I16x8Ne => write!(f, "i16x8.ne"), I32x4Ne => write!(f, "i32x4.ne"), // I64x2Ne => write!(f, "i64x2.ne"), F32x4Ne => write!(f, "f32x4.ne"), F64x2Ne => write!(f, "f64x2.ne"), I8x16LtS => write!(f, "i8x16.lt_s"), I8x16LtU => write!(f, "i8x16.lt_u"), I16x8LtS => write!(f, "i16x8.lt_s"), I16x8LtU => write!(f, "i16x8.lt_u"), I32x4LtS => write!(f, "i32x4.lt_s"), I32x4LtU => write!(f, "i32x4.lt_u"), // I64x2LtS => write!(f, "// I64x2.lt_s"), // I64x2LtU => write!(f, "// I64x2.lt_u"), F32x4Lt => write!(f, "f32x4.lt"), F64x2Lt => write!(f, "f64x2.lt"), I8x16LeS => write!(f, "i8x16.le_s"), I8x16LeU => write!(f, "i8x16.le_u"), I16x8LeS => write!(f, "i16x8.le_s"), I16x8LeU => write!(f, "i16x8.le_u"), I32x4LeS => write!(f, "i32x4.le_s"), I32x4LeU => write!(f, "i32x4.le_u"), // I64x2LeS => write!(f, "// I64x2.le_s"), // I64x2LeU => write!(f, "// I64x2.le_u"), F32x4Le => write!(f, "f32x4.le"), F64x2Le => write!(f, "f64x2.le"), I8x16GtS => write!(f, "i8x16.gt_s"), I8x16GtU => write!(f, "i8x16.gt_u"), I16x8GtS => write!(f, "i16x8.gt_s"), I16x8GtU => write!(f, "i16x8.gt_u"), I32x4GtS => write!(f, "i32x4.gt_s"), I32x4GtU => write!(f, "i32x4.gt_u"), // I64x2GtS => write!(f, "// I64x2.gt_s"), // I64x2GtU => write!(f, "// I64x2.gt_u"), F32x4Gt => write!(f, "f32x4.gt"), F64x2Gt => write!(f, "f64x2.gt"), I8x16GeS => write!(f, "i8x16.ge_s"), I8x16GeU => write!(f, "i8x16.ge_u"), I16x8GeS => write!(f, "i16x8.ge_s"), I16x8GeU => write!(f, "i16x8.ge_u"), I32x4GeS => write!(f, "i32x4.ge_s"), I32x4GeU => write!(f, "i32x4.ge_u"), // I64x2GeS => write!(f, "// I64x2.ge_s"), // I64x2GeU => write!(f, "// I64x2.ge_u"), F32x4Ge => write!(f, "f32x4.ge"), F64x2Ge => write!(f, "f64x2.ge"), F32x4Neg => write!(f, "f32x4.neg"), F64x2Neg => write!(f, "f64x2.neg"), F32x4Abs => write!(f, "f32x4.abs"), F64x2Abs => write!(f, "f64x2.abs"), F32x4Min => write!(f, "f32x4.min"), F64x2Min => write!(f, "f64x2.min"), F32x4Max => write!(f, "f32x4.max"), F64x2Max => write!(f, "f64x2.max"), F32x4Add => write!(f, "f32x4.add"), F64x2Add => write!(f, "f64x2.add"), F32x4Sub => write!(f, "f32x4.sub"), F64x2Sub => write!(f, "f64x2.sub"), F32x4Div => write!(f, "f32x4.div"), F64x2Div => write!(f, "f64x2.div"), F32x4Mul => write!(f, "f32x4.mul"), F64x2Mul => write!(f, "f64x2.mul"), F32x4Sqrt => write!(f, "f32x4.sqrt"), F64x2Sqrt => write!(f, "f64x2.sqrt"), F32x4ConvertSI32x4 => write!(f, "f32x4.convert_s/i32x4"), F32x4ConvertUI32x4 => write!(f, "f32x4.convert_u/i32x4"), F64x2ConvertSI64x2 => write!(f, "f64x2.convert_s/i64x2"), F64x2ConvertUI64x2 => write!(f, "f64x2.convert_u/i64x2"), I32x4TruncSF32x4Sat => write!(f, "i32x4.trunc_s/f32x4:sat"), I32x4TruncUF32x4Sat => write!(f, "i32x4.trunc_u/f32x4:sat"), I64x2TruncSF64x2Sat => write!(f, "i64x2.trunc_s/f64x2:sat"), I64x2TruncUF64x2Sat => write!(f, "i64x2.trunc_u/f64x2:sat"), } } } #[cfg(feature = "bulk")] impl fmt::Display for BulkInstruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::BulkInstruction::*; match *self { MemoryInit(_) => write!(f, "memory.init"), MemoryDrop(_) => write!(f, "memory.drop"), MemoryFill => write!(f, "memory.fill"), MemoryCopy => write!(f, "memory.copy"), TableInit(_) => write!(f, "table.init"), TableDrop(_) => write!(f, "table.drop"), TableCopy => write!(f, "table.copy"), } } } impl Serialize for Instructions { type Error = Error; fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> { for op in self.0.into_iter() { op.serialize(writer)?; } Ok(()) } } impl Serialize for InitExpr { type Error = Error; fn serialize<W: io::Write>(self, writer: &mut W) -> Result<(), Self::Error> { for op in self.0.into_iter() { op.serialize(writer)?; } Ok(()) } } #[test] fn ifelse() { // see if-else.wast/if-else.wasm let instruction_list = super::deserialize_buffer::<Instructions>(&[ 0x04, 0x7F, 0x41, 0x05, 0x05, 0x41, 0x07, 0x0B, 0x0B, ]) .expect("valid hex of if instruction"); let instructions = instruction_list.elements(); match instructions[0] { Instruction::If(_) => (), _ => panic!("Should be deserialized as if instruction"), } let before_else = instructions .iter() .skip(1) .take_while(|op| !matches!(**op, Instruction::Else)) .count(); let after_else = instructions .iter() .skip(1) .skip_while(|op| !matches!(**op, Instruction::Else)) .take_while(|op| !matches!(**op, Instruction::End)) .count() - 1; // minus Instruction::Else itself assert_eq!(before_else, after_else); } #[test] fn display() { let instruction = Instruction::GetLocal(0); assert_eq!("get_local 0", format!("{}", instruction)); let instruction = Instruction::F64Store(0, 24); assert_eq!("f64.store offset=24", format!("{}", instruction)); let instruction = Instruction::I64Store(0, 0); assert_eq!("i64.store", format!("{}", instruction)); } #[test] fn size_off() { assert!(::std::mem::size_of::<Instruction>() <= 24); } #[test] fn instructions_hashset() { use self::Instruction::{Block, Call, Drop}; use super::types::{BlockType::Value, ValueType}; let set: std::collections::HashSet<Instruction> = vec![Call(1), Block(Value(ValueType::I32)), Drop].into_iter().collect(); assert!(set.contains(&Drop)); }
32.040106
98
0.664501
0a9c69b8cb6454d10bf1da1999bf85c922e3dc02
11,764
use merlin::Transcript; use super::*; use crate::merkle::*; impl MerkleItem for u64 { fn commit(&self, t: &mut Transcript) { t.append_u64(b"test_item", *self); } } #[test] fn empty_utreexo() { let hasher = NodeHasher::<u64>::new(); let forest0 = Forest::new(); assert_eq!( forest0.root(&hasher), MerkleTree::empty_root(b"ZkVM.utreexo") ); } #[test] fn transient_items_utreexo() { let hasher = NodeHasher::new(); let forest0 = Forest::new(); let (_, _forest1, _catchup) = forest0 .update(&hasher, |forest| { forest.insert(&0, &hasher); forest.insert(&1, &hasher); forest .delete(&1, Proof::Transient, &hasher) .expect("just received proof should not fail"); forest .delete(&0, Proof::Transient, &hasher) .expect("just received proof should not fail"); // double spends are not allowed assert_eq!( forest.delete(&1, Proof::Transient, &hasher), Err(UtreexoError::InvalidProof) ); assert_eq!( forest.delete(&0, Proof::Transient, &hasher), Err(UtreexoError::InvalidProof) ); Ok(()) }) .unwrap(); } #[test] fn insert_to_utreexo() { let hasher = NodeHasher::new(); let forest0 = Forest::new(); let (_, forest1, catchup1) = forest0 .update(&hasher, |forest| { for i in 0..6 { forest.insert(&i, &hasher); } Ok(()) }) .expect("cannot fail"); assert_eq!( forest1.root(&hasher), MerkleTree::root(b"ZkVM.utreexo", 0..6) ); // update the proofs let proofs1 = (0..6) .map(|i| { catchup1 .update_proof(&(i as u64), Proof::Transient, &hasher) .unwrap() }) .collect::<Vec<_>>(); // after the proofs were updated, deletions should succeed let _ = forest1 .update(&hasher, |forest| { for i in 0..6u64 { forest.delete(&i, &proofs1[i as usize], &hasher)?; } Ok(()) }) .expect("all proofs must be valid"); } #[test] fn transaction_success() { let hasher = NodeHasher::new(); let forest0 = Forest::new(); let (_, forest1, catchup1) = forest0 .update(&hasher, |forest| { for i in 0..6 { forest.insert(&i, &hasher); } Ok(()) }) .expect("cannot fail"); // update the proofs let proofs1 = (0..6) .map(|i| { catchup1 .update_proof(&(i as u64), Proof::Transient, &hasher) .unwrap() }) .collect::<Vec<_>>(); dbg!(proofs1.clone()); let proofs1 = proofs1 .into_iter() .enumerate() .map(|(i, p)| catchup1.update_proof(&(i as u64), p, &hasher).unwrap()) .collect::<Vec<_>>(); dbg!(proofs1.clone()); // d // |\ // a b c // |\ |\ |\ // 0 1 2 3 4 5 // We want to do several changes that would succeed, then do a failing transaction // and check that all pre-transaction changes were respected. let mut wf = forest1.work_forest(); wf.insert(&6, &hasher); wf.delete(&0, &proofs1[0], &hasher) .expect("Should not fail."); // d // |\ // a b c new // |\ |\ |\ | // x 1 2 3 4 5 6 match wf.transaction::<_, (), ()>(|wf| { wf.insert(&7, &hasher); wf.insert(&8, &hasher); wf.delete(&7, &Proof::Transient, &hasher) .expect("Should not fail."); wf.delete(&1, &proofs1[1], &hasher) .expect("Should not fail."); Ok(()) }) { Err(_) => {} Ok(_) => {} }; let (new_forest, _) = wf.normalize(&hasher); // d // |\ // a b c new // |\ |\ |\ |\ // x x 2 3 4 5 6 8 assert_eq!( new_forest.root(&hasher), MerkleTree::root(b"ZkVM.utreexo", &[2, 3, 4, 5, 6, 8]) ); } #[test] fn transaction_fail() { let hasher = NodeHasher::new(); let forest0 = Forest::new(); let (_, forest1, catchup1) = forest0 .update(&hasher, |forest| { for i in 0..6 { forest.insert(&i, &hasher); } Ok(()) }) .expect("cannot fail"); // update the proofs let proofs1 = (0..6) .map(|i| { catchup1 .update_proof(&(i as u64), Proof::Transient, &hasher) .unwrap() }) .collect::<Vec<_>>(); // d // |\ // a b c // |\ |\ |\ // 0 1 2 3 4 5 // We want to do several changes that would succeed, then do a failing transaction // and check that all pre-transaction changes were respected. let mut wf = forest1.work_forest(); wf.insert(&6, &hasher); wf.delete(&0, &proofs1[0], &hasher) .expect("Should not fail."); // d // |\ // a b c new // |\ |\ |\ | // x 1 2 3 4 5 6 match wf.transaction::<_, (), ()>(|wf| { wf.insert(&7, &hasher); wf.insert(&8, &hasher); wf.delete(&7, &Proof::Transient, &hasher) .expect("Should not fail."); wf.delete(&1, &proofs1[1], &hasher) .expect("Should not fail."); Err(()) }) { Err(_) => {} Ok(_) => {} }; let (new_forest, _) = wf.normalize(&hasher); // Should contain only the changes before transaction // d f // |\ | \ // a b c new -> b c -> b c h // |\ |\ |\ | |\ |\ |\ |\ |\ // x 1 2 3 4 5 6 x 1 2 3 4 5 6 2 3 4 5 1 6 assert_eq!( new_forest.root(&hasher), MerkleTree::root(b"ZkVM.utreexo", &[2, 3, 4, 5, 1, 6]) ); } #[test] fn insert_and_delete_utreexo() { let n = 6u64; let hasher = NodeHasher::new(); let forest0 = Forest::new(); let (_, forest1, catchup1) = forest0 .update(&hasher, |forest| { for i in 0..n { forest.insert(&i, &hasher); } Ok(()) }) .expect("cannot fail"); // update the proofs let proofs1 = (0..n) .map(|i| { catchup1 .update_proof(&(i as u64), Proof::Transient, &hasher) .unwrap() }) .collect::<Vec<_>>(); // after the proofs were updated, deletions should succeed forest1 .verify(&0u64, &proofs1[0], &hasher) .expect("proof should be valid"); forest1 .verify(&5u64, &proofs1[5], &hasher) .expect("proof should be valid"); fn verify_update<M: MerkleItem>( forest: &Forest, new_set: &[M], upd: impl FnOnce(&mut WorkForest), ) -> (Forest, Catchup) { let hasher = NodeHasher::<M>::new(); let (_, forest2, catchup2) = forest .update(&hasher, |forest| { upd(forest); Ok(()) }) .unwrap(); assert_eq!( forest2.root(&hasher), MerkleTree::root(b"ZkVM.utreexo", new_set) ); (forest2, catchup2) } // delete 0: // d e // |\ | \ // a b c -> b c -> b c // |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 x 1 2 3 4 5 2 3 4 5 1 forest1.verify(&0u64, &proofs1[0], &hasher).unwrap(); let (_, _) = verify_update(&forest1, &[2, 3, 4, 5, 1], |forest| { forest.delete(&0u64, &proofs1[0], &hasher).unwrap(); }); // delete 1: // d e // |\ | \ // a b c -> b c -> b c // |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 0 x 2 3 4 5 2 3 4 5 0 forest1.verify(&1u64, &proofs1[1], &hasher).unwrap(); let (_, _) = verify_update(&forest1, &[2, 3, 4, 5, 0], |forest| { forest.delete(&1u64, &proofs1[1], &hasher).unwrap(); }); // delete 2: // d e // |\ | \ // a b c -> a c -> a c // |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 0 1 x 3 4 5 0 1 4 5 3 let (_, _) = verify_update(&forest1, &[0, 1, 4, 5, 3], |forest| { forest.delete(&2u64, &proofs1[2], &hasher).unwrap(); }); // delete 5: // d e // |\ | \ // a b c -> a b -> a b // |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 0 1 2 3 4 x 0 1 2 3 4 let (_, _) = verify_update(&forest1, &[0, 1, 2, 3, 4], |forest| { forest.delete(&5u64, &proofs1[5], &hasher).unwrap(); }); // delete 2,3: // d e // |\ | \ // a b c -> a c -> a c // |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 0 1 x x 4 5 0 1 4 5 let (_, _) = verify_update(&forest1, &[0, 1, 4, 5], |forest| { forest.delete(&2u64, &proofs1[2], &hasher).unwrap(); forest.delete(&3u64, &proofs1[3], &hasher).unwrap(); }); // delete in another order let (_, _) = verify_update(&forest1, &[0, 1, 4, 5], |forest| { forest.delete(&3u64, &proofs1[3], &hasher).unwrap(); forest.delete(&2u64, &proofs1[2], &hasher).unwrap(); }); // delete 0,3: // d f // |\ | \ // a b c -> c -> e c // |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 x 1 2 x 4 5 1 2 4 5 let (_, _) = verify_update(&forest1, &[1, 2, 4, 5], |forest| { forest.delete(&0u64, &proofs1[0], &hasher).unwrap(); forest.delete(&3u64, &proofs1[3], &hasher).unwrap(); }); // delete 0, insert 6, 7: // d f // |\ | \ // a b c -> b c -> b c h // |\ |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 x 1 2 3 4 5 6 7 2 3 4 5 1 6 7 let (forest2, catchup) = verify_update(&forest1, &[2, 3, 4, 5, 1, 6, 7], |forest| { forest.delete(&0u64, &proofs1[0], &hasher).unwrap(); forest.insert(&6u64, &hasher); forest.insert(&7u64, &hasher); }); let proof7 = catchup .update_proof(&7u64, Proof::Transient, &hasher) .unwrap(); let proof2 = catchup .update_proof(&2u64, proofs1[2].clone(), &hasher) .unwrap(); // delete 2, 7: // f f g // | \ | \ | \ // b c h -> b c h -> c h // |\ |\ |\ |\ |\ |\ |\ |\ // 2 3 4 5 1 6 7 x 3 4 5 1 6 x 4 5 1 6 3 // let (_forest2, _catchup) = verify_update(&forest2, &[4, 5, 1, 6, 3], |forest| { forest.delete(&2u64, &proof2, &hasher).unwrap(); forest.delete(&7u64, &proof7, &hasher).unwrap(); }); }
29.933842
87
0.407004
ffa51a8a072bcf711efd5b586cc90e7890572711
261
pub(crate) fn parse_as_mac(mac_len: usize, data: &[u8]) -> String { let mut rt = String::new(); for i in 0..mac_len { rt.push_str(&format!("{:02X}", data[i])); if i != mac_len - 1 { rt.push_str(":"); } } rt }
23.727273
67
0.475096
11b0a8f12e0a6fa02d7b8ffd35a734ab0b404b3f
2,277
#[derive(PartialEq, Eq, Clone, Debug)] pub struct ListNode { pub val: i32, pub next: Option<Box<ListNode>>, } impl ListNode { #[inline] fn new(val: i32) -> Self { ListNode { next: None, val, } } } struct Solution {} impl Solution { pub fn add_two_numbers(l1: Option<Box<ListNode>>, l2: Option<Box<ListNode>>) -> Option<Box<ListNode>> { let (mut lhs, mut rhs) = (l1, l2); let mut res: Option<Box<ListNode>> = Some(Box::new(ListNode::new(0))); let (mut tmp, mut sum) = (&mut res, 0); let (mut lhs_tag, mut rhs_tag) = (lhs.is_some(), rhs.is_some()); while lhs_tag || rhs_tag || sum > 0 { if lhs_tag { sum += lhs.as_ref().unwrap().val; lhs = lhs.unwrap().next; lhs_tag = lhs.is_some(); } if rhs_tag { sum += rhs.as_ref().unwrap().val; rhs = rhs.unwrap().next; rhs_tag = rhs.is_some(); } tmp.as_mut().unwrap().next = Some(Box::new(ListNode::new(sum % 10))); tmp = &mut tmp.as_mut().unwrap().next; sum /= 10; } res.unwrap().next } } fn main() { let mut first = Some(Box::new(ListNode::new(3))); first.as_mut().unwrap().next = None; let mut second = Some(Box::new(ListNode::new(4))); second.as_mut().unwrap().next = first; let mut left = Some(Box::new(ListNode::new(2))); left.as_mut().unwrap().next = second; println!("left:{:?}", left); let mut first = Some(Box::new(ListNode::new(4))); first.as_mut().unwrap().next = None; let mut second = Some(Box::new(ListNode::new(6))); second.as_mut().unwrap().next = first; let mut right = Some(Box::new(ListNode::new(5))); right.as_mut().unwrap().next = second; println!("right:{:?}", right); let mut first = Some(Box::new(ListNode::new(8))); first.as_mut().unwrap().next = None; let mut second = Some(Box::new(ListNode::new(0))); second.as_mut().unwrap().next = first; let mut res = Some(Box::new(ListNode::new(7))); res.as_mut().unwrap().next = second; println!("res{:?}", res); assert_eq!(res, Solution::add_two_numbers(left, right)); }
31.191781
107
0.537989
282d29c41b702d952908ed35b5d05bc2db25e243
6,085
use std::collections::VecDeque; use std::pin::Pin; use std::task::{Context, Poll}; use actori::dev::{ AsyncContextParts, ContextFut, ContextParts, Envelope, Mailbox, ToEnvelope, }; use actori::fut::ActorFuture; use actori::{ Actor, ActorContext, ActorState, Addr, AsyncContext, Handler, Message, SpawnHandle, }; use actori_web::error::Error; use bytes::Bytes; use futures::channel::oneshot::Sender; use futures::{Future, Stream}; /// Execution context for http actors pub struct HttpContext<A> where A: Actor<Context = HttpContext<A>>, { inner: ContextParts<A>, stream: VecDeque<Option<Bytes>>, } impl<A> ActorContext for HttpContext<A> where A: Actor<Context = Self>, { fn stop(&mut self) { self.inner.stop(); } fn terminate(&mut self) { self.inner.terminate() } fn state(&self) -> ActorState { self.inner.state() } } impl<A> AsyncContext<A> for HttpContext<A> where A: Actor<Context = Self>, { #[inline] fn spawn<F>(&mut self, fut: F) -> SpawnHandle where F: ActorFuture<Output = (), Actor = A> + 'static, { self.inner.spawn(fut) } #[inline] fn wait<F>(&mut self, fut: F) where F: ActorFuture<Output = (), Actor = A> + 'static, { self.inner.wait(fut) } #[doc(hidden)] #[inline] fn waiting(&self) -> bool { self.inner.waiting() || self.inner.state() == ActorState::Stopping || self.inner.state() == ActorState::Stopped } #[inline] fn cancel_future(&mut self, handle: SpawnHandle) -> bool { self.inner.cancel_future(handle) } #[inline] fn address(&self) -> Addr<A> { self.inner.address() } } impl<A> HttpContext<A> where A: Actor<Context = Self>, { #[inline] /// Create a new HTTP Context from a request and an actor pub fn create(actor: A) -> impl Stream<Item = Result<Bytes, Error>> { let mb = Mailbox::default(); let ctx = HttpContext { inner: ContextParts::new(mb.sender_producer()), stream: VecDeque::new(), }; HttpContextFut::new(ctx, actor, mb) } /// Create a new HTTP Context pub fn with_factory<F>(f: F) -> impl Stream<Item = Result<Bytes, Error>> where F: FnOnce(&mut Self) -> A + 'static, { let mb = Mailbox::default(); let mut ctx = HttpContext { inner: ContextParts::new(mb.sender_producer()), stream: VecDeque::new(), }; let act = f(&mut ctx); HttpContextFut::new(ctx, act, mb) } } impl<A> HttpContext<A> where A: Actor<Context = Self>, { /// Write payload #[inline] pub fn write(&mut self, data: Bytes) { self.stream.push_back(Some(data)); } /// Indicate end of streaming payload. Also this method calls `Self::close`. #[inline] pub fn write_eof(&mut self) { self.stream.push_back(None); } /// Handle of the running future /// /// SpawnHandle is the handle returned by `AsyncContext::spawn()` method. pub fn handle(&self) -> SpawnHandle { self.inner.curr_handle() } } impl<A> AsyncContextParts<A> for HttpContext<A> where A: Actor<Context = Self>, { fn parts(&mut self) -> &mut ContextParts<A> { &mut self.inner } } struct HttpContextFut<A> where A: Actor<Context = HttpContext<A>>, { fut: ContextFut<A, HttpContext<A>>, } impl<A> HttpContextFut<A> where A: Actor<Context = HttpContext<A>>, { fn new(ctx: HttpContext<A>, act: A, mailbox: Mailbox<A>) -> Self { let fut = ContextFut::new(ctx, act, mailbox); HttpContextFut { fut } } } impl<A> Stream for HttpContextFut<A> where A: Actor<Context = HttpContext<A>>, { type Item = Result<Bytes, Error>; fn poll_next( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Self::Item>> { if self.fut.alive() { let _ = Pin::new(&mut self.fut).poll(cx); } // frames if let Some(data) = self.fut.ctx().stream.pop_front() { Poll::Ready(data.map(|b| Ok(b))) } else if self.fut.alive() { Poll::Pending } else { Poll::Ready(None) } } } impl<A, M> ToEnvelope<A, M> for HttpContext<A> where A: Actor<Context = HttpContext<A>> + Handler<M>, M: Message + Send + 'static, M::Result: Send, { fn pack(msg: M, tx: Option<Sender<M::Result>>) -> Envelope<A> { Envelope::new(msg, tx) } } #[cfg(test)] mod tests { use std::time::Duration; use actori::Actor; use actori_web::http::StatusCode; use actori_web::test::{call_service, init_service, read_body, TestRequest}; use actori_web::{web, App, HttpResponse}; use bytes::Bytes; use super::*; struct MyActor { count: usize, } impl Actor for MyActor { type Context = HttpContext<Self>; fn started(&mut self, ctx: &mut Self::Context) { ctx.run_later(Duration::from_millis(100), |slf, ctx| slf.write(ctx)); } } impl MyActor { fn write(&mut self, ctx: &mut HttpContext<Self>) { self.count += 1; if self.count > 3 { ctx.write_eof() } else { ctx.write(Bytes::from(format!("LINE-{}", self.count))); ctx.run_later(Duration::from_millis(100), |slf, ctx| slf.write(ctx)); } } } #[actori_rt::test] async fn test_default_resource() { let mut srv = init_service(App::new().service(web::resource("/test").to(|| { HttpResponse::Ok().streaming(HttpContext::create(MyActor { count: 0 })) }))) .await; let req = TestRequest::with_uri("/test").to_request(); let resp = call_service(&mut srv, req).await; assert_eq!(resp.status(), StatusCode::OK); let body = read_body(resp).await; assert_eq!(body, Bytes::from_static(b"LINE-1LINE-2LINE-3")); } }
24.437751
87
0.569597
89146b4dd2c9bd5f58b61a89a2cab4b61f7b6d76
5,873
use clippy_utils::diagnostics::span_lint_and_help; use clippy_utils::ty::{is_must_use_ty, match_type}; use clippy_utils::{is_must_use_func_call, paths}; use if_chain::if_chain; use rustc_hir::{Local, PatKind}; use rustc_lint::{LateContext, LateLintPass}; use rustc_middle::lint::in_external_macro; use rustc_middle::ty::subst::GenericArgKind; use rustc_session::{declare_lint_pass, declare_tool_lint}; declare_clippy_lint! { /// ### What it does /// Checks for `let _ = <expr>` /// where expr is #[must_use] /// /// ### Why is this bad? /// It's better to explicitly /// handle the value of a #[must_use] expr /// /// ### Example /// ```rust /// fn f() -> Result<u32, u32> { /// Ok(0) /// } /// /// let _ = f(); /// // is_ok() is marked #[must_use] /// let _ = f().is_ok(); /// ``` pub LET_UNDERSCORE_MUST_USE, restriction, "non-binding let on a `#[must_use]` expression" } declare_clippy_lint! { /// ### What it does /// Checks for `let _ = sync_lock` /// /// ### Why is this bad? /// This statement immediately drops the lock instead of /// extending its lifetime to the end of the scope, which is often not intended. /// To extend lock lifetime to the end of the scope, use an underscore-prefixed /// name instead (i.e. _lock). If you want to explicitly drop the lock, /// `std::mem::drop` conveys your intention better and is less error-prone. /// /// ### Example /// /// Bad: /// ```rust,ignore /// let _ = mutex.lock(); /// ``` /// /// Good: /// ```rust,ignore /// let _lock = mutex.lock(); /// ``` pub LET_UNDERSCORE_LOCK, correctness, "non-binding let on a synchronization lock" } declare_clippy_lint! { /// ### What it does /// Checks for `let _ = <expr>` /// where expr has a type that implements `Drop` /// /// ### Why is this bad? /// This statement immediately drops the initializer /// expression instead of extending its lifetime to the end of the scope, which /// is often not intended. To extend the expression's lifetime to the end of the /// scope, use an underscore-prefixed name instead (i.e. _var). If you want to /// explicitly drop the expression, `std::mem::drop` conveys your intention /// better and is less error-prone. /// /// ### Example /// /// Bad: /// ```rust,ignore /// struct Droppable; /// impl Drop for Droppable { /// fn drop(&mut self) {} /// } /// { /// let _ = Droppable; /// // ^ dropped here /// /* more code */ /// } /// ``` /// /// Good: /// ```rust,ignore /// { /// let _droppable = Droppable; /// /* more code */ /// // dropped at end of scope /// } /// ``` pub LET_UNDERSCORE_DROP, pedantic, "non-binding let on a type that implements `Drop`" } declare_lint_pass!(LetUnderscore => [LET_UNDERSCORE_MUST_USE, LET_UNDERSCORE_LOCK, LET_UNDERSCORE_DROP]); const SYNC_GUARD_PATHS: [&[&str]; 3] = [ &paths::MUTEX_GUARD, &paths::RWLOCK_READ_GUARD, &paths::RWLOCK_WRITE_GUARD, ]; impl<'tcx> LateLintPass<'tcx> for LetUnderscore { fn check_local(&mut self, cx: &LateContext<'_>, local: &Local<'_>) { if in_external_macro(cx.tcx.sess, local.span) { return; } if_chain! { if let PatKind::Wild = local.pat.kind; if let Some(init) = local.init; then { let init_ty = cx.typeck_results().expr_ty(init); let contains_sync_guard = init_ty.walk(cx.tcx).any(|inner| match inner.unpack() { GenericArgKind::Type(inner_ty) => { SYNC_GUARD_PATHS.iter().any(|path| match_type(cx, inner_ty, path)) }, GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => false, }); if contains_sync_guard { span_lint_and_help( cx, LET_UNDERSCORE_LOCK, local.span, "non-binding let on a synchronization lock", None, "consider using an underscore-prefixed named \ binding or dropping explicitly with `std::mem::drop`" ); } else if init_ty.needs_drop(cx.tcx, cx.param_env) { span_lint_and_help( cx, LET_UNDERSCORE_DROP, local.span, "non-binding `let` on a type that implements `Drop`", None, "consider using an underscore-prefixed named \ binding or dropping explicitly with `std::mem::drop`" ); } else if is_must_use_ty(cx, cx.typeck_results().expr_ty(init)) { span_lint_and_help( cx, LET_UNDERSCORE_MUST_USE, local.span, "non-binding let on an expression with `#[must_use]` type", None, "consider explicitly using expression value" ); } else if is_must_use_func_call(cx, init) { span_lint_and_help( cx, LET_UNDERSCORE_MUST_USE, local.span, "non-binding let on a result of a `#[must_use]` function", None, "consider explicitly using function result" ); } } } } }
34.145349
105
0.514558
2650ec2dcbcfd71580d78241124da37b8aa50dc2
1,488
use rapier2d::prelude::*; use rapier_testbed2d::Testbed; pub fn init_world(testbed: &mut Testbed) { /* * World */ let mut bodies = RigidBodySet::new(); let mut colliders = ColliderSet::new(); let impulse_joints = ImpulseJointSet::new(); let multibody_joints = MultibodyJointSet::new(); /* * Ground */ let ground_size = 20.; let ground_height = 1.0; let rigid_body = RigidBodyBuilder::new_static() .translation(vector![0.0, -ground_height]) .build(); let handle = bodies.insert(rigid_body); let collider = ColliderBuilder::cuboid(ground_size, ground_height) .restitution(1.0) .build(); colliders.insert_with_parent(collider, handle, &mut bodies); let num = 10; let rad = 0.5; for j in 0..2 { for i in 0..=num { let x = (i as f32) - num as f32 / 2.0; let rigid_body = RigidBodyBuilder::new_dynamic() .translation(vector![x * 2.0, 10.0 * (j as f32 + 1.0)]) .build(); let handle = bodies.insert(rigid_body); let collider = ColliderBuilder::ball(rad) .restitution((i as f32) / (num as f32)) .build(); colliders.insert_with_parent(collider, handle, &mut bodies); } } /* * Set up the testbed. */ testbed.set_world(bodies, colliders, impulse_joints, multibody_joints); testbed.look_at(point![0.0, 1.0], 25.0); }
29.176471
75
0.576613
71429af18797b8ac198905b5f94498a6096470de
304
use super::*; pub(crate) trait Cast { fn cast<T: JsCast>(self) -> Result<T>; } impl<V: JsCast + std::fmt::Debug> Cast for V { fn cast<T: JsCast>(self) -> Result<T> { Ok( self .dyn_into::<T>() .map_err(|value| format!("`cast` failed for value: {:?}", value))?, ) } }
19
75
0.523026
28de255ff2312ec25b8bfc5c3617d5aae1413c86
4,187
use futures_util::future::TryFutureExt; use lazy_static::lazy_static; use rustls::internal::pemfile::{certs, rsa_private_keys}; use rustls::{ClientConfig, ServerConfig}; use std::io::{BufReader, Cursor}; use std::net::SocketAddr; use std::sync::mpsc::channel; use std::sync::Arc; use std::{io, thread}; use tokio::io::{copy, split, AsyncReadExt, AsyncWriteExt}; use tokio::net::{TcpListener, TcpStream}; use tokio::runtime; use tokio_rustls::{TlsAcceptor, TlsConnector}; const CERT: &str = include_str!("end.cert"); const CHAIN: &str = include_str!("end.chain"); const RSA: &str = include_str!("end.rsa"); lazy_static! { static ref TEST_SERVER: (SocketAddr, &'static str, &'static str) = { let cert = certs(&mut BufReader::new(Cursor::new(CERT))).unwrap(); let mut keys = rsa_private_keys(&mut BufReader::new(Cursor::new(RSA))).unwrap(); let mut config = ServerConfig::new(rustls::NoClientAuth::new()); config .set_single_cert(cert, keys.pop().unwrap()) .expect("invalid key or certificate"); let acceptor = TlsAcceptor::from(Arc::new(config)); let (send, recv) = channel(); thread::spawn(move || { let runtime = runtime::Builder::new_current_thread() .enable_io() .build() .unwrap(); let runtime = Arc::new(runtime); let runtime2 = runtime.clone(); let done = async move { let addr = SocketAddr::from(([127, 0, 0, 1], 0)); let listener = TcpListener::bind(&addr).await?; send.send(listener.local_addr()?).unwrap(); loop { let (stream, _) = listener.accept().await?; let acceptor = acceptor.clone(); let fut = async move { let stream = acceptor.accept(stream).await?; let (mut reader, mut writer) = split(stream); copy(&mut reader, &mut writer).await?; Ok(()) as io::Result<()> } .unwrap_or_else(|err| eprintln!("server: {:?}", err)); runtime2.spawn(fut); } } .unwrap_or_else(|err: io::Error| eprintln!("server: {:?}", err)); runtime.block_on(done); }); let addr = recv.recv().unwrap(); (addr, "testserver.com", CHAIN) }; } fn start_server() -> &'static (SocketAddr, &'static str, &'static str) { &*TEST_SERVER } async fn start_client(addr: SocketAddr, domain: &str, config: Arc<ClientConfig>) -> io::Result<()> { const FILE: &[u8] = include_bytes!("../README.md"); let domain = webpki::DNSNameRef::try_from_ascii_str(domain).unwrap(); let config = TlsConnector::from(config); let mut buf = vec![0; FILE.len()]; let stream = TcpStream::connect(&addr).await?; let mut stream = config.connect(domain, stream).await?; stream.write_all(FILE).await?; stream.flush().await?; stream.read_exact(&mut buf).await?; assert_eq!(buf, FILE); Ok(()) } #[tokio::test] async fn pass() -> io::Result<()> { let (addr, domain, chain) = start_server(); // TODO: not sure how to resolve this right now but since // TcpStream::bind now returns a future it creates a race // condition until its ready sometimes. use std::time::*; tokio::time::sleep(Duration::from_secs(1)).await; let mut config = ClientConfig::new(); let mut chain = BufReader::new(Cursor::new(chain)); config.root_store.add_pem_file(&mut chain).unwrap(); let config = Arc::new(config); start_client(*addr, domain, config.clone()).await?; Ok(()) } #[tokio::test] async fn fail() -> io::Result<()> { let (addr, domain, chain) = start_server(); let mut config = ClientConfig::new(); let mut chain = BufReader::new(Cursor::new(chain)); config.root_store.add_pem_file(&mut chain).unwrap(); let config = Arc::new(config); assert_ne!(domain, &"google.com"); let ret = start_client(*addr, "google.com", config).await; assert!(ret.is_err()); Ok(()) }
32.207692
100
0.580368
eb4d57678e3d0c2345ebb882bbc39e8f2ffb1fd2
428
//! UDP Socket options and extra data #[derive(Debug, Clone, Copy, Eq, PartialEq, Default)] pub struct UdpSocketControlData { /// Session ID in client. /// /// For identifying an unique association in client pub client_session_id: u64, /// Session ID in server. /// /// For identifying an unique association in server pub server_session_id: u64, /// Packet counter pub packet_id: u64, }
26.75
55
0.665888
ebbd951380c5e3fba6c985ba9ef1275ecbe25558
3,969
// Generated from definition io.k8s.api.apps.v1beta1.DeploymentStrategy /// DeploymentStrategy describes how to replace existing pods with new ones. #[derive(Clone, Debug, Default, PartialEq)] pub struct DeploymentStrategy { /// Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate. pub rolling_update: Option<crate::v1_13::api::apps::v1beta1::RollingUpdateDeployment>, /// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. pub type_: Option<String>, } impl<'de> serde::Deserialize<'de> for DeploymentStrategy { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_rolling_update, Key_type_, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "rollingUpdate" => Field::Key_rolling_update, "type" => Field::Key_type_, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = DeploymentStrategy; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "struct DeploymentStrategy") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_rolling_update: Option<crate::v1_13::api::apps::v1beta1::RollingUpdateDeployment> = None; let mut value_type_: Option<String> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_rolling_update => value_rolling_update = serde::de::MapAccess::next_value(&mut map)?, Field::Key_type_ => value_type_ = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(DeploymentStrategy { rolling_update: value_rolling_update, type_: value_type_, }) } } deserializer.deserialize_struct( "DeploymentStrategy", &[ "rollingUpdate", "type", ], Visitor, ) } } impl serde::Serialize for DeploymentStrategy { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "DeploymentStrategy", self.rolling_update.as_ref().map_or(0, |_| 1) + self.type_.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.rolling_update { serde::ser::SerializeStruct::serialize_field(&mut state, "rollingUpdate", value)?; } if let Some(value) = &self.type_ { serde::ser::SerializeStruct::serialize_field(&mut state, "type", value)?; } serde::ser::SerializeStruct::end(state) } }
39.29703
120
0.536911
e6cc580e1e3a535e7e9fc6a46a7d0fd5b32d6cb2
15,882
// -*- mode: rust; -*- // // This file is part of curve25519-dalek. // Copyright (c) 2016-2021 isis lovecruft // Copyright (c) 2016-2019 Henry de Valence // See LICENSE for licensing information. // // Authors: // - isis agora lovecruft <[email protected]> // - Henry de Valence <[email protected]> //! Module for common traits. #![allow(non_snake_case)] use core::borrow::Borrow; use subtle; use scalar::Scalar; // ------------------------------------------------------------------------ // Public Traits // ------------------------------------------------------------------------ /// Trait for getting the identity element of a point type. pub trait Identity { /// Returns the identity element of the curve. /// Can be used as a constructor. fn identity() -> Self; } /// Trait for testing if a curve point is equivalent to the identity point. pub trait IsIdentity { /// Return true if this element is the identity element of the curve. fn is_identity(&self) -> bool; } /// Implement generic identity equality testing for a point representations /// which have constant-time equality testing and a defined identity /// constructor. impl<T> IsIdentity for T where T: subtle::ConstantTimeEq + Identity, { fn is_identity(&self) -> bool { self.ct_eq(&T::identity()).unwrap_u8() == 1u8 } } /// A precomputed table of basepoints, for optimising scalar multiplications. pub trait BasepointTable { /// The type of point contained within this table. type Point; /// Generate a new precomputed basepoint table from the given basepoint. fn create(basepoint: &Self::Point) -> Self; /// Retrieve the original basepoint from this table. fn basepoint(&self) -> Self::Point; /// Multiply a `scalar` by this precomputed basepoint table, in constant time. fn basepoint_mul(&self, scalar: &Scalar) -> Self::Point; } /// A trait for constant-time multiscalar multiplication without precomputation. pub trait MultiscalarMul { /// The type of point being multiplied, e.g., `RistrettoPoint`. type Point; /// Given an iterator of (possibly secret) scalars and an iterator of /// public points, compute /// $$ /// Q = c\_1 P\_1 + \cdots + c\_n P\_n. /// $$ /// /// It is an error to call this function with two iterators of different lengths. /// /// # Examples /// /// The trait bound aims for maximum flexibility: the inputs must be /// convertable to iterators (`I: IntoIter`), and the iterator's items /// must be `Borrow<Scalar>` (or `Borrow<Point>`), to allow /// iterators returning either `Scalar`s or `&Scalar`s. /// /// ``` /// use curve25519_dalek::constants; /// use curve25519_dalek::traits::MultiscalarMul; /// use curve25519_dalek::ristretto::RistrettoPoint; /// use curve25519_dalek::scalar::Scalar; /// /// // Some scalars /// let a = Scalar::from(87329482u64); /// let b = Scalar::from(37264829u64); /// let c = Scalar::from(98098098u64); /// /// // Some points /// let P = constants::RISTRETTO_BASEPOINT_POINT; /// let Q = P + P; /// let R = P + Q; /// /// // A1 = a*P + b*Q + c*R /// let abc = [a,b,c]; /// let A1 = RistrettoPoint::multiscalar_mul(&abc, &[P,Q,R]); /// // Note: (&abc).into_iter(): Iterator<Item=&Scalar> /// /// // A2 = (-a)*P + (-b)*Q + (-c)*R /// let minus_abc = abc.iter().map(|x| -x); /// let A2 = RistrettoPoint::multiscalar_mul(minus_abc, &[P,Q,R]); /// // Note: minus_abc.into_iter(): Iterator<Item=Scalar> /// /// assert_eq!(A1.compress(), (-A2).compress()); /// ``` fn multiscalar_mul<I, J>(scalars: I, points: J) -> Self::Point where I: IntoIterator, I::Item: Borrow<Scalar>, J: IntoIterator, J::Item: Borrow<Self::Point>; } /// A trait for variable-time multiscalar multiplication without precomputation. pub trait VartimeMultiscalarMul { /// The type of point being multiplied, e.g., `RistrettoPoint`. type Point; /// Given an iterator of public scalars and an iterator of /// `Option`s of points, compute either `Some(Q)`, where /// $$ /// Q = c\_1 P\_1 + \cdots + c\_n P\_n, /// $$ /// if all points were `Some(P_i)`, or else return `None`. /// /// This function is particularly useful when verifying statements /// involving compressed points. Accepting `Option<Point>` allows /// inlining point decompression into the multiscalar call, /// avoiding the need for temporary buffers. /// ``` /// use curve25519_dalek::constants; /// use curve25519_dalek::traits::VartimeMultiscalarMul; /// use curve25519_dalek::ristretto::RistrettoPoint; /// use curve25519_dalek::scalar::Scalar; /// /// // Some scalars /// let a = Scalar::from(87329482u64); /// let b = Scalar::from(37264829u64); /// let c = Scalar::from(98098098u64); /// let abc = [a,b,c]; /// /// // Some points /// let P = constants::RISTRETTO_BASEPOINT_POINT; /// let Q = P + P; /// let R = P + Q; /// let PQR = [P, Q, R]; /// /// let compressed = [P.compress(), Q.compress(), R.compress()]; /// /// // Now we can compute A1 = a*P + b*Q + c*R using P, Q, R: /// let A1 = RistrettoPoint::vartime_multiscalar_mul(&abc, &PQR); /// /// // Or using the compressed points: /// let A2 = RistrettoPoint::optional_multiscalar_mul( /// &abc, /// compressed.iter().map(|pt| pt.decompress()), /// ); /// /// assert_eq!(A2, Some(A1)); /// /// // It's also possible to mix compressed and uncompressed points: /// let A3 = RistrettoPoint::optional_multiscalar_mul( /// abc.iter() /// .chain(abc.iter()), /// compressed.iter().map(|pt| pt.decompress()) /// .chain(PQR.iter().map(|&pt| Some(pt))), /// ); /// /// assert_eq!(A3, Some(A1+A1)); /// ``` fn optional_multiscalar_mul<I, J>(scalars: I, points: J) -> Option<Self::Point> where I: IntoIterator, I::Item: Borrow<Scalar>, J: IntoIterator<Item = Option<Self::Point>>; /// Given an iterator of public scalars and an iterator of /// public points, compute /// $$ /// Q = c\_1 P\_1 + \cdots + c\_n P\_n, /// $$ /// using variable-time operations. /// /// It is an error to call this function with two iterators of different lengths. /// /// # Examples /// /// The trait bound aims for maximum flexibility: the inputs must be /// convertable to iterators (`I: IntoIter`), and the iterator's items /// must be `Borrow<Scalar>` (or `Borrow<Point>`), to allow /// iterators returning either `Scalar`s or `&Scalar`s. /// /// ``` /// use curve25519_dalek::constants; /// use curve25519_dalek::traits::VartimeMultiscalarMul; /// use curve25519_dalek::ristretto::RistrettoPoint; /// use curve25519_dalek::scalar::Scalar; /// /// // Some scalars /// let a = Scalar::from(87329482u64); /// let b = Scalar::from(37264829u64); /// let c = Scalar::from(98098098u64); /// /// // Some points /// let P = constants::RISTRETTO_BASEPOINT_POINT; /// let Q = P + P; /// let R = P + Q; /// /// // A1 = a*P + b*Q + c*R /// let abc = [a,b,c]; /// let A1 = RistrettoPoint::vartime_multiscalar_mul(&abc, &[P,Q,R]); /// // Note: (&abc).into_iter(): Iterator<Item=&Scalar> /// /// // A2 = (-a)*P + (-b)*Q + (-c)*R /// let minus_abc = abc.iter().map(|x| -x); /// let A2 = RistrettoPoint::vartime_multiscalar_mul(minus_abc, &[P,Q,R]); /// // Note: minus_abc.into_iter(): Iterator<Item=Scalar> /// /// assert_eq!(A1.compress(), (-A2).compress()); /// ``` fn vartime_multiscalar_mul<I, J>(scalars: I, points: J) -> Self::Point where I: IntoIterator, I::Item: Borrow<Scalar>, J: IntoIterator, J::Item: Borrow<Self::Point>, Self::Point: Clone, { Self::optional_multiscalar_mul( scalars, points.into_iter().map(|P| Some(P.borrow().clone())), ) .unwrap() } } /// A trait for variable-time multiscalar multiplication with precomputation. /// /// A general multiscalar multiplication with precomputation can be written as /// $$ /// Q = a_1 A_1 + \cdots + a_n A_n + b_1 B_1 + \cdots + b_m B_m, /// $$ /// where the \\(B_i\\) are *static* points, for which precomputation /// is possible, and the \\(A_j\\) are *dynamic* points, for which /// precomputation is not possible. /// /// This trait has three methods for performing this computation: /// /// * [`vartime_multiscalar_mul`], which handles the special case /// where \\(n = 0\\) and there are no dynamic points; /// /// * [`vartime_mixed_multiscalar_mul`], which takes the dynamic /// points as already-validated `Point`s and is infallible; /// /// * [`optional_mixed_multiscalar_mul`], which takes the dynamic /// points as `Option<Point>`s and returns an `Option<Point>`, /// allowing decompression to be composed into the input iterators. /// /// All methods require that the lengths of the input iterators be /// known and matching, as if they were `ExactSizeIterator`s. (It /// does not require `ExactSizeIterator` only because that trait is /// broken). pub trait VartimePrecomputedMultiscalarMul: Sized { /// The type of point to be multiplied, e.g., `RistrettoPoint`. type Point: Clone; /// Given the static points \\( B_i \\), perform precomputation /// and return the precomputation data. fn new<I>(static_points: I) -> Self where I: IntoIterator, I::Item: Borrow<Self::Point>; /// Given `static_scalars`, an iterator of public scalars /// \\(b_i\\), compute /// $$ /// Q = b_1 B_1 + \cdots + b_m B_m, /// $$ /// where the \\(B_j\\) are the points that were supplied to `new`. /// /// It is an error to call this function with iterators of /// inconsistent lengths. /// /// The trait bound aims for maximum flexibility: the input must /// be convertable to iterators (`I: IntoIter`), and the /// iterator's items must be `Borrow<Scalar>`, to allow iterators /// returning either `Scalar`s or `&Scalar`s. fn vartime_multiscalar_mul<I>(&self, static_scalars: I) -> Self::Point where I: IntoIterator, I::Item: Borrow<Scalar>, { use core::iter; Self::vartime_mixed_multiscalar_mul( self, static_scalars, iter::empty::<Scalar>(), iter::empty::<Self::Point>(), ) } /// Given `static_scalars`, an iterator of public scalars /// \\(b_i\\), `dynamic_scalars`, an iterator of public scalars /// \\(a_i\\), and `dynamic_points`, an iterator of points /// \\(A_i\\), compute /// $$ /// Q = a_1 A_1 + \cdots + a_n A_n + b_1 B_1 + \cdots + b_m B_m, /// $$ /// where the \\(B_j\\) are the points that were supplied to `new`. /// /// It is an error to call this function with iterators of /// inconsistent lengths. /// /// The trait bound aims for maximum flexibility: the inputs must be /// convertable to iterators (`I: IntoIter`), and the iterator's items /// must be `Borrow<Scalar>` (or `Borrow<Point>`), to allow /// iterators returning either `Scalar`s or `&Scalar`s. fn vartime_mixed_multiscalar_mul<I, J, K>( &self, static_scalars: I, dynamic_scalars: J, dynamic_points: K, ) -> Self::Point where I: IntoIterator, I::Item: Borrow<Scalar>, J: IntoIterator, J::Item: Borrow<Scalar>, K: IntoIterator, K::Item: Borrow<Self::Point>, { Self::optional_mixed_multiscalar_mul( self, static_scalars, dynamic_scalars, dynamic_points.into_iter().map(|P| Some(P.borrow().clone())), ) .unwrap() } /// Given `static_scalars`, an iterator of public scalars /// \\(b_i\\), `dynamic_scalars`, an iterator of public scalars /// \\(a_i\\), and `dynamic_points`, an iterator of points /// \\(A_i\\), compute /// $$ /// Q = a_1 A_1 + \cdots + a_n A_n + b_1 B_1 + \cdots + b_m B_m, /// $$ /// where the \\(B_j\\) are the points that were supplied to `new`. /// /// If any of the dynamic points were `None`, return `None`. /// /// It is an error to call this function with iterators of /// inconsistent lengths. /// /// This function is particularly useful when verifying statements /// involving compressed points. Accepting `Option<Point>` allows /// inlining point decompression into the multiscalar call, /// avoiding the need for temporary buffers. fn optional_mixed_multiscalar_mul<I, J, K>( &self, static_scalars: I, dynamic_scalars: J, dynamic_points: K, ) -> Option<Self::Point> where I: IntoIterator, I::Item: Borrow<Scalar>, J: IntoIterator, J::Item: Borrow<Scalar>, K: IntoIterator<Item = Option<Self::Point>>; } /// A trait for variable-time multiscalar multiplication with precomputation /// which is efficient when the vector of scalars contains zeros. /// /// A multiscalar multiplication with precomputation can be written as /// $$ /// Q = B_1 + \cdots + b_m B_m, /// $$ /// where the \\(B_i\\) are *static* points, for which precomputation /// is possible. Unlike the VartimePrecomputedMultiscalarMul trait, this trait does not /// support *dynamic* points \\(A_j\\) for which precomputation is not possible. /// /// This trait has one method for performing this computation: ///\ /// * [`vartime_subset_multiscalar_mul`], which handles the case /// where some of the scalars are zero; /// /// The lengths of the scalars iterator needs to be less than or equal to the # of static points /// passed in `new`. pub trait VartimePrecomputedSubsetMultiscalarMul: Sized { /// The type of point to be multiplied, e.g., `RistrettoPoint`. type Point: Clone; /// Given the static points \\( B_i \\), perform precomputation /// and return the precomputation data. fn new<I>(static_points: I) -> Self where I: IntoIterator, I::Item: Borrow<Self::Point>; /// Given `static_scalars`, an iterator of public scalars /// \\(b_i\\), compute /// $$ /// Q = b_1 B_1 + \cdots + b_m B_m, /// $$ /// where the \\(B_j\\) are the points that were supplied to `new`. /// /// It is an error to call this function with a scalar iterator larger in size that the static /// points supplied to `new`. Of course, unlike in `VartimePrecomputedMultiscalarMul`, the iterator /// can be over fewer things than the number of these static points. /// /// The trait bound aims for maximum flexibility: the input must be convertable to iterators /// (`I: IntoIter`), and the iterator's items must be `(usize, S)` where `S: Borrow<Scalar>`, /// to allow iterators returning either `Scalar`s or `&Scalar`s. fn vartime_subset_multiscalar_mul<I, S>(&self, static_scalars: I) -> Self::Point where I: IntoIterator<Item = (usize, S)>, S: Borrow<Scalar>; } // ------------------------------------------------------------------------ // Private Traits // ------------------------------------------------------------------------ /// Trait for checking whether a point is on the curve. /// /// This trait is only for debugging/testing, since it should be /// impossible for a `curve25519-dalek` user to construct an invalid /// point. pub(crate) trait ValidityCheck { /// Checks whether the point is on the curve. Not CT. fn is_valid(&self) -> bool; }
35.77027
103
0.599043
e9b6b3a63f09f912c0f01fadb3f0b8d14b38b586
1,661
// The Nature of Code // Daniel Shiffman // http://natureofcode.com // // Example 1-2: Bouncing Ball, with Vector! use nannou::prelude::*; fn main() { nannou::app(model) .update(update) .simple_window(view) .size(300, 300) .run(); } struct Model { ball: Ball, } struct Ball { position: Point2, velocity: Vec2, } impl Ball { fn new() -> Self { let position = pt2(100.0, 100.0); let velocity = vec2(2.5, 5.0); Ball { position, velocity } } fn update(&mut self, rect: Rect<f32>) { // Add the current speed to the position. self.position += self.velocity; if self.position.x > rect.right() || self.position.x < rect.left() { self.velocity.x = self.velocity.x * -1.0; } if self.position.y > rect.top() || self.position.y < rect.bottom() { self.velocity.y = self.velocity.y * -1.0; } } fn display(&self, draw: &Draw) { // Display circle at x position draw.ellipse() .xy(self.position) .w_h(16.0, 16.0) .gray(0.5) .stroke(BLACK); } } fn model(_app: &App) -> Model { let ball = Ball::new(); Model { ball } } fn update(app: &App, m: &mut Model, _update: Update) { m.ball.update(app.window_rect()); } fn view(app: &App, m: &Model, frame: Frame) { // Begin drawing let draw = app.draw(); draw.rect() .wh(app.window_rect().wh()) .rgba(1.0, 1.0, 1.0, 0.03); m.ball.display(&draw); // Write the result of our drawing to the window's frame. draw.to_frame(app, &frame).unwrap(); }
22.146667
76
0.53823
2607c5dddc96d61c513a79cdfb7976efe9144dd3
2,655
use crate::prelude::*; use crate::vk; use crate::RawPtr; use crate::{Device, Instance}; use std::ffi::CStr; use std::mem; #[derive(Clone)] pub struct CreateRenderPass2 { handle: vk::Device, fp: vk::KhrCreateRenderpass2Fn, } impl CreateRenderPass2 { pub fn new(instance: &Instance, device: &Device) -> Self { let handle = device.handle(); let fp = vk::KhrCreateRenderpass2Fn::load(|name| unsafe { mem::transmute(instance.get_device_proc_addr(handle, name.as_ptr())) }); Self { handle, fp } } /// <https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkCreateRenderPass2.html> pub unsafe fn create_render_pass2( &self, create_info: &vk::RenderPassCreateInfo2, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult<vk::RenderPass> { let mut renderpass = mem::zeroed(); self.fp .create_render_pass2_khr( self.handle, create_info, allocation_callbacks.as_raw_ptr(), &mut renderpass, ) .result_with_success(renderpass) } /// <https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkCmdBeginRenderPass2.html> pub unsafe fn cmd_begin_render_pass2( &self, command_buffer: vk::CommandBuffer, render_pass_begin_info: &vk::RenderPassBeginInfo, subpass_begin_info: &vk::SubpassBeginInfo, ) { self.fp.cmd_begin_render_pass2_khr( command_buffer, render_pass_begin_info, subpass_begin_info, ); } /// <https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkCmdNextSubpass2.html> pub unsafe fn cmd_next_subpass2( &self, command_buffer: vk::CommandBuffer, subpass_begin_info: &vk::SubpassBeginInfo, subpass_end_info: &vk::SubpassEndInfo, ) { self.fp .cmd_next_subpass2_khr(command_buffer, subpass_begin_info, subpass_end_info); } /// <https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkCmdEndRenderPass2.html> pub unsafe fn cmd_end_render_pass2( &self, command_buffer: vk::CommandBuffer, subpass_end_info: &vk::SubpassEndInfo, ) { self.fp .cmd_end_render_pass2_khr(command_buffer, subpass_end_info); } pub fn name() -> &'static CStr { vk::KhrCreateRenderpass2Fn::name() } pub fn fp(&self) -> &vk::KhrCreateRenderpass2Fn { &self.fp } pub fn device(&self) -> vk::Device { self.handle } }
30.517241
106
0.626365
162aa45134d5ee487ef9bdcb774a7170d920f03d
56,616
use solana_cli::{ cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, spend_utils::SpendAmount, test_utils::{check_ready, check_recent_balance}, }; use solana_cli_output::{parse_sign_only_reply_string, OutputFormat}; use solana_client::{ blockhash_query::{self, BlockhashQuery}, nonce_utils, rpc_client::RpcClient, }; use solana_core::test_validator::TestValidator; use solana_faucet::faucet::run_local_faucet; use solana_sdk::{ account_utils::StateMut, commitment_config::CommitmentConfig, nonce::State as NonceState, pubkey::Pubkey, signature::{keypair_from_seed, Keypair, Signer}, stake::{ self, instruction::LockupArgs, state::{Lockup, StakeAuthorize, StakeState}, }, }; #[test] fn test_stake_delegation_force() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let default_signer = Keypair::new(); let mut config = CliConfig::recent_for_tests(); config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 100_000) .unwrap(); // Create vote account let vote_keypair = Keypair::new(); config.signers = vec![&default_signer, &vote_keypair]; config.command = CliCommand::CreateVoteAccount { vote_account: 1, seed: None, identity_account: 0, authorized_voter: None, authorized_withdrawer: None, commission: 0, memo: None, }; process_command(&config).unwrap(); // Create stake account let stake_keypair = Keypair::new(); config.signers = vec![&default_signer, &stake_keypair]; config.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: None, withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); // Delegate stake fails (vote account had never voted) config.signers = vec![&default_signer]; config.command = CliCommand::DelegateStake { stake_account_pubkey: stake_keypair.pubkey(), vote_account_pubkey: vote_keypair.pubkey(), stake_authority: 0, force: false, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config).unwrap_err(); // But if we force it, it works anyway! config.command = CliCommand::DelegateStake { stake_account_pubkey: stake_keypair.pubkey(), vote_account_pubkey: vote_keypair.pubkey(), stake_authority: 0, force: true, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config).unwrap(); } #[test] fn test_seed_stake_delegation_and_deactivation() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let validator_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let mut config_validator = CliConfig::recent_for_tests(); config_validator.json_rpc_url = test_validator.rpc_url(); config_validator.signers = vec![&validator_keypair]; request_and_confirm_airdrop( &rpc_client, &config_validator, &config_validator.signers[0].pubkey(), 100_000, ) .unwrap(); check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey()); let stake_address = Pubkey::create_with_seed( &config_validator.signers[0].pubkey(), "hi there", &stake::program::id(), ) .expect("bad seed"); // Create stake account with a seed, uses the validator config as the base, // which is nice ;) config_validator.command = CliCommand::CreateStakeAccount { stake_account: 0, seed: Some("hi there".to_string()), staker: None, withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config_validator).unwrap(); // Delegate stake config_validator.command = CliCommand::DelegateStake { stake_account_pubkey: stake_address, vote_account_pubkey: test_validator.vote_account_address(), stake_authority: 0, force: true, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config_validator).unwrap(); // Deactivate stake config_validator.command = CliCommand::DeactivateStake { stake_account_pubkey: stake_address, stake_authority: 0, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, seed: None, fee_payer: 0, }; process_command(&config_validator).unwrap(); } #[test] fn test_stake_delegation_and_deactivation() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let validator_keypair = Keypair::new(); let mut config_validator = CliConfig::recent_for_tests(); config_validator.json_rpc_url = test_validator.rpc_url(); config_validator.signers = vec![&validator_keypair]; let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); request_and_confirm_airdrop( &rpc_client, &config_validator, &config_validator.signers[0].pubkey(), 100_000, ) .unwrap(); check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey()); // Create stake account config_validator.signers.push(&stake_keypair); config_validator.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: None, withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config_validator).unwrap(); // Delegate stake config_validator.signers.pop(); config_validator.command = CliCommand::DelegateStake { stake_account_pubkey: stake_keypair.pubkey(), vote_account_pubkey: test_validator.vote_account_address(), stake_authority: 0, force: true, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config_validator).unwrap(); // Deactivate stake config_validator.command = CliCommand::DeactivateStake { stake_account_pubkey: stake_keypair.pubkey(), stake_authority: 0, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, seed: None, fee_payer: 0, }; process_command(&config_validator).unwrap(); } #[test] fn test_offline_stake_delegation_and_deactivation() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let mut config_validator = CliConfig::recent_for_tests(); config_validator.json_rpc_url = test_validator.rpc_url(); let validator_keypair = Keypair::new(); config_validator.signers = vec![&validator_keypair]; let mut config_payer = CliConfig::recent_for_tests(); config_payer.json_rpc_url = test_validator.rpc_url(); let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let mut config_offline = CliConfig::recent_for_tests(); config_offline.json_rpc_url = String::default(); config_offline.command = CliCommand::ClusterVersion; let offline_keypair = Keypair::new(); config_offline.signers = vec![&offline_keypair]; // Verify that we cannot reach the cluster process_command(&config_offline).unwrap_err(); request_and_confirm_airdrop( &rpc_client, &config_validator, &config_validator.signers[0].pubkey(), 100_000, ) .unwrap(); check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey()); request_and_confirm_airdrop( &rpc_client, &config_offline, &config_offline.signers[0].pubkey(), 100_000, ) .unwrap(); check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey()); // Create stake account config_validator.signers.push(&stake_keypair); config_validator.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: Some(config_offline.signers[0].pubkey()), withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config_validator).unwrap(); // Delegate stake offline let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap(); config_offline.command = CliCommand::DelegateStake { stake_account_pubkey: stake_keypair.pubkey(), vote_account_pubkey: test_validator.vote_account_address(), stake_authority: 0, force: true, sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); assert!(sign_only.has_all_signers()); let offline_presigner = sign_only .presigner_of(&config_offline.signers[0].pubkey()) .unwrap(); config_payer.signers = vec![&offline_presigner]; config_payer.command = CliCommand::DelegateStake { stake_account_pubkey: stake_keypair.pubkey(), vote_account_pubkey: test_validator.vote_account_address(), stake_authority: 0, force: true, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config_payer).unwrap(); // Deactivate stake offline let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap(); config_offline.command = CliCommand::DeactivateStake { stake_account_pubkey: stake_keypair.pubkey(), stake_authority: 0, sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, memo: None, seed: None, fee_payer: 0, }; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); assert!(sign_only.has_all_signers()); let offline_presigner = sign_only .presigner_of(&config_offline.signers[0].pubkey()) .unwrap(); config_payer.signers = vec![&offline_presigner]; config_payer.command = CliCommand::DeactivateStake { stake_account_pubkey: stake_keypair.pubkey(), stake_authority: 0, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, memo: None, seed: None, fee_payer: 0, }; process_command(&config_payer).unwrap(); } #[test] fn test_nonced_stake_delegation_and_deactivation() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let config_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let mut config = CliConfig::recent_for_tests(); config.signers = vec![&config_keypair]; config.json_rpc_url = test_validator.rpc_url(); let minimum_nonce_balance = rpc_client .get_minimum_balance_for_rent_exemption(NonceState::size()) .unwrap(); request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 100_000) .unwrap(); // Create stake account let stake_keypair = Keypair::new(); config.signers.push(&stake_keypair); config.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: None, withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); // Create nonce account let nonce_account = Keypair::new(); config.signers[1] = &nonce_account; config.command = CliCommand::CreateNonceAccount { nonce_account: 1, seed: None, nonce_authority: Some(config.signers[0].pubkey()), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; // Delegate stake config.signers = vec![&config_keypair]; config.command = CliCommand::DelegateStake { stake_account_pubkey: stake_keypair.pubkey(), vote_account_pubkey: test_validator.vote_account_address(), stake_authority: 0, force: true, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_account.pubkey()), nonce_hash, ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config).unwrap(); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; // Deactivate stake config.command = CliCommand::DeactivateStake { stake_account_pubkey: stake_keypair.pubkey(), stake_authority: 0, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_account.pubkey()), nonce_hash, ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, memo: None, seed: None, fee_payer: 0, }; process_command(&config).unwrap(); } #[test] fn test_stake_authorize() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let default_signer = Keypair::new(); let mut config = CliConfig::recent_for_tests(); config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 100_000) .unwrap(); let offline_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let mut config_offline = CliConfig::recent_for_tests(); config_offline.signers = vec![&offline_keypair]; config_offline.json_rpc_url = String::default(); let offline_authority_pubkey = config_offline.signers[0].pubkey(); config_offline.command = CliCommand::ClusterVersion; // Verify that we cannot reach the cluster process_command(&config_offline).unwrap_err(); request_and_confirm_airdrop( &rpc_client, &config_offline, &config_offline.signers[0].pubkey(), 100_000, ) .unwrap(); // Create stake account, identity is authority let stake_keypair = Keypair::new(); let stake_account_pubkey = stake_keypair.pubkey(); config.signers.push(&stake_keypair); config.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: None, withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); // Assign new online stake authority let online_authority = Keypair::new(); let online_authority_pubkey = online_authority.pubkey(); config.signers.pop(); config.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 0)], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); let stake_state: StakeState = stake_account.state().unwrap(); let current_authority = match stake_state { StakeState::Initialized(meta) => meta.authorized.staker, _ => panic!("Unexpected stake state!"), }; assert_eq!(current_authority, online_authority_pubkey); // Assign new online stake and withdraw authorities let online_authority2 = Keypair::new(); let online_authority2_pubkey = online_authority2.pubkey(); let withdraw_authority = Keypair::new(); let withdraw_authority_pubkey = withdraw_authority.pubkey(); config.signers.push(&online_authority); config.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![ (StakeAuthorize::Staker, online_authority2_pubkey, 1), (StakeAuthorize::Withdrawer, withdraw_authority_pubkey, 0), ], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); let stake_state: StakeState = stake_account.state().unwrap(); let (current_staker, current_withdrawer) = match stake_state { StakeState::Initialized(meta) => (meta.authorized.staker, meta.authorized.withdrawer), _ => panic!("Unexpected stake state!"), }; assert_eq!(current_staker, online_authority2_pubkey); assert_eq!(current_withdrawer, withdraw_authority_pubkey); // Assign new offline stake authority config.signers.pop(); config.signers.push(&online_authority2); config.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, offline_authority_pubkey, 1)], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); let stake_state: StakeState = stake_account.state().unwrap(); let current_authority = match stake_state { StakeState::Initialized(meta) => meta.authorized.staker, _ => panic!("Unexpected stake state!"), }; assert_eq!(current_authority, offline_authority_pubkey); // Offline assignment of new nonced stake authority let nonced_authority = Keypair::new(); let nonced_authority_pubkey = nonced_authority.pubkey(); let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap(); config_offline.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)], sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; config_offline.output_format = OutputFormat::JsonCompact; let sign_reply = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sign_reply); assert!(sign_only.has_all_signers()); let offline_presigner = sign_only.presigner_of(&offline_authority_pubkey).unwrap(); config.signers = vec![&offline_presigner]; config.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); let stake_state: StakeState = stake_account.state().unwrap(); let current_authority = match stake_state { StakeState::Initialized(meta) => meta.authorized.staker, _ => panic!("Unexpected stake state!"), }; assert_eq!(current_authority, nonced_authority_pubkey); // Create nonce account let minimum_nonce_balance = rpc_client .get_minimum_balance_for_rent_exemption(NonceState::size()) .unwrap(); let nonce_account = Keypair::new(); config.signers = vec![&default_signer, &nonce_account]; config.command = CliCommand::CreateNonceAccount { nonce_account: 1, seed: None, nonce_authority: Some(offline_authority_pubkey), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; // Nonced assignment of new online stake authority let online_authority = Keypair::new(); let online_authority_pubkey = online_authority.pubkey(); config_offline.signers.push(&nonced_authority); config_offline.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)], sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; let sign_reply = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sign_reply); assert!(sign_only.has_all_signers()); assert_eq!(sign_only.blockhash, nonce_hash); let offline_presigner = sign_only.presigner_of(&offline_authority_pubkey).unwrap(); let nonced_authority_presigner = sign_only.presigner_of(&nonced_authority_pubkey).unwrap(); config.signers = vec![&offline_presigner, &nonced_authority_presigner]; config.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_account.pubkey()), sign_only.blockhash, ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); let stake_state: StakeState = stake_account.state().unwrap(); let current_authority = match stake_state { StakeState::Initialized(meta) => meta.authorized.staker, _ => panic!("Unexpected stake state!"), }; assert_eq!(current_authority, online_authority_pubkey); let new_nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; assert_ne!(nonce_hash, new_nonce_hash); } #[test] fn test_stake_authorize_with_fee_payer() { solana_logger::setup(); const SIG_FEE: u64 = 42; let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_custom_fees(mint_pubkey, SIG_FEE, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let default_signer = Keypair::new(); let default_pubkey = default_signer.pubkey(); let mut config = CliConfig::recent_for_tests(); config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; let payer_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let mut config_payer = CliConfig::recent_for_tests(); config_payer.signers = vec![&payer_keypair]; config_payer.json_rpc_url = test_validator.rpc_url(); let payer_pubkey = config_payer.signers[0].pubkey(); let mut config_offline = CliConfig::recent_for_tests(); let offline_signer = Keypair::new(); config_offline.signers = vec![&offline_signer]; config_offline.json_rpc_url = String::new(); let offline_pubkey = config_offline.signers[0].pubkey(); // Verify we're offline config_offline.command = CliCommand::ClusterVersion; process_command(&config_offline).unwrap_err(); request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 100_000).unwrap(); check_recent_balance(100_000, &rpc_client, &config.signers[0].pubkey()); request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 100_000).unwrap(); check_recent_balance(100_000, &rpc_client, &payer_pubkey); request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap(); check_recent_balance(100_000, &rpc_client, &offline_pubkey); check_ready(&rpc_client); // Create stake account, identity is authority let stake_keypair = Keypair::new(); let stake_account_pubkey = stake_keypair.pubkey(); config.signers.push(&stake_keypair); config.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: None, withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); // `config` balance should be 50,000 - 1 stake account sig - 1 fee sig check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey); // Assign authority with separate fee payer config.signers = vec![&default_signer, &payer_keypair]; config.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, offline_pubkey, 0)], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 1, custodian: None, no_wait: false, }; process_command(&config).unwrap(); // `config` balance has not changed, despite submitting the TX check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey); // `config_payer` however has paid `config`'s authority sig // and `config_payer`'s fee sig check_recent_balance(100_000 - SIG_FEE - SIG_FEE, &rpc_client, &payer_pubkey); // Assign authority with offline fee payer let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap(); config_offline.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)], sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; config_offline.output_format = OutputFormat::JsonCompact; let sign_reply = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sign_reply); assert!(sign_only.has_all_signers()); let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); config.signers = vec![&offline_presigner]; config.command = CliCommand::StakeAuthorize { stake_account_pubkey, new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)], sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, custodian: None, no_wait: false, }; process_command(&config).unwrap(); // `config`'s balance again has not changed check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey); // `config_offline` however has paid 1 sig due to being both authority // and fee payer check_recent_balance(100_000 - SIG_FEE, &rpc_client, &offline_pubkey); } #[test] fn test_stake_split() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let default_signer = Keypair::new(); let offline_signer = Keypair::new(); let mut config = CliConfig::recent_for_tests(); config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; let mut config_offline = CliConfig::recent_for_tests(); config_offline.json_rpc_url = String::default(); config_offline.signers = vec![&offline_signer]; let offline_pubkey = config_offline.signers[0].pubkey(); // Verify we're offline config_offline.command = CliCommand::ClusterVersion; process_command(&config_offline).unwrap_err(); request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000) .unwrap(); check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey()); request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap(); check_recent_balance(100_000, &rpc_client, &offline_pubkey); // Create stake account, identity is authority let minimum_stake_balance = rpc_client .get_minimum_balance_for_rent_exemption(std::mem::size_of::<StakeState>()) .unwrap(); let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let stake_account_pubkey = stake_keypair.pubkey(); config.signers.push(&stake_keypair); config.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: Some(offline_pubkey), withdrawer: Some(offline_pubkey), lockup: Lockup::default(), amount: SpendAmount::Some(10 * minimum_stake_balance), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); check_recent_balance( 10 * minimum_stake_balance, &rpc_client, &stake_account_pubkey, ); // Create nonce account let minimum_nonce_balance = rpc_client .get_minimum_balance_for_rent_exemption(NonceState::size()) .unwrap(); let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap(); config.signers = vec![&default_signer, &nonce_account]; config.command = CliCommand::CreateNonceAccount { nonce_account: 1, seed: None, nonce_authority: Some(offline_pubkey), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); check_recent_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey()); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; // Nonced offline split let split_account = keypair_from_seed(&[2u8; 32]).unwrap(); check_recent_balance(0, &rpc_client, &split_account.pubkey()); config_offline.signers.push(&split_account); config_offline.command = CliCommand::SplitStake { stake_account_pubkey, stake_authority: 0, sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, memo: None, split_stake_account: 1, seed: None, lamports: 2 * minimum_stake_balance, fee_payer: 0, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); assert!(sign_only.has_all_signers()); let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); config.signers = vec![&offline_presigner, &split_account]; config.command = CliCommand::SplitStake { stake_account_pubkey, stake_authority: 0, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_account.pubkey()), sign_only.blockhash, ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, memo: None, split_stake_account: 1, seed: None, lamports: 2 * minimum_stake_balance, fee_payer: 0, }; process_command(&config).unwrap(); check_recent_balance( 8 * minimum_stake_balance, &rpc_client, &stake_account_pubkey, ); check_recent_balance( 2 * minimum_stake_balance, &rpc_client, &split_account.pubkey(), ); } #[test] fn test_stake_set_lockup() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let default_signer = Keypair::new(); let offline_signer = Keypair::new(); let mut config = CliConfig::recent_for_tests(); config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; let mut config_offline = CliConfig::recent_for_tests(); config_offline.json_rpc_url = String::default(); config_offline.signers = vec![&offline_signer]; let offline_pubkey = config_offline.signers[0].pubkey(); // Verify we're offline config_offline.command = CliCommand::ClusterVersion; process_command(&config_offline).unwrap_err(); request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000) .unwrap(); check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey()); request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap(); check_recent_balance(100_000, &rpc_client, &offline_pubkey); // Create stake account, identity is authority let minimum_stake_balance = rpc_client .get_minimum_balance_for_rent_exemption(std::mem::size_of::<StakeState>()) .unwrap(); let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let stake_account_pubkey = stake_keypair.pubkey(); let lockup = Lockup { custodian: config.signers[0].pubkey(), ..Lockup::default() }; config.signers.push(&stake_keypair); config.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: Some(offline_pubkey), withdrawer: Some(config.signers[0].pubkey()), lockup, amount: SpendAmount::Some(10 * minimum_stake_balance), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); check_recent_balance( 10 * minimum_stake_balance, &rpc_client, &stake_account_pubkey, ); // Online set lockup let lockup = LockupArgs { unix_timestamp: Some(1_581_534_570), epoch: Some(200), custodian: None, }; config.signers.pop(); config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, custodian: 0, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); let stake_state: StakeState = stake_account.state().unwrap(); let current_lockup = match stake_state { StakeState::Initialized(meta) => meta.lockup, _ => panic!("Unexpected stake state!"), }; assert_eq!( current_lockup.unix_timestamp, lockup.unix_timestamp.unwrap() ); assert_eq!(current_lockup.epoch, lockup.epoch.unwrap()); assert_eq!(current_lockup.custodian, config.signers[0].pubkey()); // Set custodian to another pubkey let online_custodian = Keypair::new(); let online_custodian_pubkey = online_custodian.pubkey(); let lockup = LockupArgs { unix_timestamp: Some(1_581_534_571), epoch: Some(201), custodian: Some(online_custodian_pubkey), }; config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, custodian: 0, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config).unwrap(); let lockup = LockupArgs { unix_timestamp: Some(1_581_534_572), epoch: Some(202), custodian: None, }; config.signers = vec![&default_signer, &online_custodian]; config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, custodian: 1, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); let stake_state: StakeState = stake_account.state().unwrap(); let current_lockup = match stake_state { StakeState::Initialized(meta) => meta.lockup, _ => panic!("Unexpected stake state!"), }; assert_eq!( current_lockup.unix_timestamp, lockup.unix_timestamp.unwrap() ); assert_eq!(current_lockup.epoch, lockup.epoch.unwrap()); assert_eq!(current_lockup.custodian, online_custodian_pubkey); // Set custodian to offline pubkey let lockup = LockupArgs { unix_timestamp: Some(1_581_534_573), epoch: Some(203), custodian: Some(offline_pubkey), }; config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, custodian: 1, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config).unwrap(); // Create nonce account let minimum_nonce_balance = rpc_client .get_minimum_balance_for_rent_exemption(NonceState::size()) .unwrap(); let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap(); let nonce_account_pubkey = nonce_account.pubkey(); config.signers = vec![&default_signer, &nonce_account]; config.command = CliCommand::CreateNonceAccount { nonce_account: 1, seed: None, nonce_authority: Some(offline_pubkey), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); check_recent_balance(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; // Nonced offline set lockup let lockup = LockupArgs { unix_timestamp: Some(1_581_534_576), epoch: Some(222), custodian: None, }; config_offline.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, custodian: 0, sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_account_pubkey), nonce_authority: 0, memo: None, fee_payer: 0, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); assert!(sign_only.has_all_signers()); let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); config.signers = vec![&offline_presigner]; config.command = CliCommand::StakeSetLockup { stake_account_pubkey, lockup, custodian: 0, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_account_pubkey), sign_only.blockhash, ), nonce_account: Some(nonce_account_pubkey), nonce_authority: 0, memo: None, fee_payer: 0, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); let stake_state: StakeState = stake_account.state().unwrap(); let current_lockup = match stake_state { StakeState::Initialized(meta) => meta.lockup, _ => panic!("Unexpected stake state!"), }; assert_eq!( current_lockup.unix_timestamp, lockup.unix_timestamp.unwrap() ); assert_eq!(current_lockup.epoch, lockup.epoch.unwrap()); assert_eq!(current_lockup.custodian, offline_pubkey); } #[test] fn test_offline_nonced_create_stake_account_and_withdraw() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr)); let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let mut config = CliConfig::recent_for_tests(); let default_signer = keypair_from_seed(&[1u8; 32]).unwrap(); config.signers = vec![&default_signer]; config.json_rpc_url = test_validator.rpc_url(); let mut config_offline = CliConfig::recent_for_tests(); let offline_signer = keypair_from_seed(&[2u8; 32]).unwrap(); config_offline.signers = vec![&offline_signer]; let offline_pubkey = config_offline.signers[0].pubkey(); config_offline.json_rpc_url = String::default(); config_offline.command = CliCommand::ClusterVersion; // Verify that we cannot reach the cluster process_command(&config_offline).unwrap_err(); request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 200_000) .unwrap(); check_recent_balance(200_000, &rpc_client, &config.signers[0].pubkey()); request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap(); check_recent_balance(100_000, &rpc_client, &offline_pubkey); // Create nonce account let minimum_nonce_balance = rpc_client .get_minimum_balance_for_rent_exemption(NonceState::size()) .unwrap(); let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap(); let nonce_pubkey = nonce_account.pubkey(); config.signers.push(&nonce_account); config.command = CliCommand::CreateNonceAccount { nonce_account: 1, seed: None, nonce_authority: Some(offline_pubkey), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; // Create stake account offline let stake_keypair = keypair_from_seed(&[4u8; 32]).unwrap(); let stake_pubkey = stake_keypair.pubkey(); config_offline.signers.push(&stake_keypair); config_offline.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: None, withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_pubkey), nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); assert!(sign_only.has_all_signers()); let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); let stake_presigner = sign_only.presigner_of(&stake_pubkey).unwrap(); config.signers = vec![&offline_presigner, &stake_presigner]; config.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: None, staker: Some(offline_pubkey), withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_pubkey), sign_only.blockhash, ), nonce_account: Some(nonce_pubkey), nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); check_recent_balance(50_000, &rpc_client, &stake_pubkey); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; // Offline, nonced stake-withdraw let recipient = keypair_from_seed(&[5u8; 32]).unwrap(); let recipient_pubkey = recipient.pubkey(); config_offline.signers.pop(); config_offline.command = CliCommand::WithdrawStake { stake_account_pubkey: stake_pubkey, destination_account_pubkey: recipient_pubkey, amount: SpendAmount::Some(42), withdraw_authority: 0, custodian: None, sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_pubkey), nonce_authority: 0, memo: None, seed: None, fee_payer: 0, }; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); config.signers = vec![&offline_presigner]; config.command = CliCommand::WithdrawStake { stake_account_pubkey: stake_pubkey, destination_account_pubkey: recipient_pubkey, amount: SpendAmount::Some(42), withdraw_authority: 0, custodian: None, sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_pubkey), sign_only.blockhash, ), nonce_account: Some(nonce_pubkey), nonce_authority: 0, memo: None, seed: None, fee_payer: 0, }; process_command(&config).unwrap(); check_recent_balance(42, &rpc_client, &recipient_pubkey); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), CommitmentConfig::processed(), ) .and_then(|ref a| nonce_utils::data_from_account(a)) .unwrap() .blockhash; // Create another stake account. This time with seed let seed = "seedy"; config_offline.signers = vec![&offline_signer, &stake_keypair]; config_offline.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: Some(seed.to_string()), staker: None, withdrawer: None, lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_pubkey), nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); let stake_presigner = sign_only.presigner_of(&stake_pubkey).unwrap(); config.signers = vec![&offline_presigner, &stake_presigner]; config.command = CliCommand::CreateStakeAccount { stake_account: 1, seed: Some(seed.to_string()), staker: Some(offline_pubkey), withdrawer: Some(offline_pubkey), lockup: Lockup::default(), amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_pubkey), sign_only.blockhash, ), nonce_account: Some(nonce_pubkey), nonce_authority: 0, memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); let seed_address = Pubkey::create_with_seed(&stake_pubkey, seed, &stake::program::id()).unwrap(); check_recent_balance(50_000, &rpc_client, &seed_address); }
36.199488
100
0.672177
ef15343d698d6718c8579c05912f368a6d495926
585
// modules2.rs // Make me compile! Execute `rustlings hint modules2` for hints :) mod delicious_snacks { pub use self::fruits::PEAR as fruit; pub use self::veggies::CARROT as veggie; mod fruits { pub const PEAR: &'static str = "Pear"; pub const APPLE: &'static str = "Apple"; } mod veggies { pub const CUCUMBER: &'static str = "Cucumber"; pub const CARROT: &'static str = "Carrot"; } } fn main() { println!( "favorite snacks: {} and {}", delicious_snacks::fruit, delicious_snacks::veggie ); }
22.5
66
0.584615
09970469cb083dc1e54910579014805147873670
79,856
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0. use crossbeam::channel::{TryRecvError, TrySendError}; use engine::rocks; use engine::rocks::CompactionJobInfo; use engine::{WriteBatch, WriteOptions, DB}; use engine::{CF_DEFAULT, CF_LOCK, CF_RAFT, CF_WRITE}; use futures::Future; use kvproto::import_sstpb::SstMeta; use kvproto::metapb::{self, Region, RegionEpoch}; use kvproto::pdpb::StoreStats; use kvproto::raft_cmdpb::{AdminCmdType, AdminRequest}; use kvproto::raft_serverpb::{PeerState, RaftMessage, RegionLocalState}; use protobuf::Message; use raft::{Ready, StateRole}; use std::collections::BTreeMap; use std::collections::Bound::{Excluded, Included, Unbounded}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use std::{mem, thread, u64}; use time::{self, Timespec}; use tokio_threadpool::{Sender as ThreadPoolSender, ThreadPool}; use crate::import::SSTImporter; use crate::raftstore::coprocessor::split_observer::SplitObserver; use crate::raftstore::coprocessor::{CoprocessorHost, RegionChangeEvent}; use crate::raftstore::store::config::Config; use crate::raftstore::store::fsm::metrics::*; use crate::raftstore::store::fsm::peer::{ maybe_destroy_source, new_admin_request, PeerFsm, PeerFsmDelegate, }; #[cfg(feature = "failpoints")] use crate::raftstore::store::fsm::ApplyTaskRes; use crate::raftstore::store::fsm::{ batch, create_apply_batch_system, ApplyBatchSystem, ApplyPollerBuilder, ApplyRouter, ApplyTask, BasicMailbox, BatchRouter, BatchSystem, HandlerBuilder, }; use crate::raftstore::store::fsm::{ApplyNotifier, Fsm, PollHandler, RegionProposal}; use crate::raftstore::store::local_metrics::RaftMetrics; use crate::raftstore::store::metrics::*; use crate::raftstore::store::peer_storage::{self, HandleRaftReadyContext, InvokeContext}; use crate::raftstore::store::transport::Transport; use crate::raftstore::store::util::is_initial_msg; use crate::raftstore::store::worker::{ CleanupRunner, CleanupSSTRunner, CleanupSSTTask, CleanupTask, CompactRunner, CompactTask, ConsistencyCheckRunner, ConsistencyCheckTask, PdRunner, RaftlogGcRunner, RaftlogGcTask, ReadDelegate, RegionRunner, RegionTask, SplitCheckRunner, SplitCheckTask, }; use crate::raftstore::store::PdTask; use crate::raftstore::store::{ util, Callback, CasualMessage, PeerMsg, RaftCommand, SignificantMsg, SnapManager, SnapshotDeleter, StoreMsg, StoreTick, }; use crate::raftstore::Result; use crate::storage::kv::{CompactedEvent, CompactionListener}; use engine::Engines; use engine::{Iterable, Mutable, Peekable}; use keys::{self, data_end_key, data_key, enc_end_key, enc_start_key}; use pd_client::PdClient; use tikv_util::collections::{HashMap, HashSet}; use tikv_util::mpsc::{self, LooseBoundedSender, Receiver}; use tikv_util::time::{duration_to_sec, SlowTimer}; use tikv_util::timer::SteadyTimer; use tikv_util::worker::{FutureScheduler, FutureWorker, Scheduler, Worker}; use tikv_util::{is_zero_duration, sys as sys_util, Either, RingQueue}; type Key = Vec<u8>; const KV_WB_SHRINK_SIZE: usize = 256 * 1024; const RAFT_WB_SHRINK_SIZE: usize = 1024 * 1024; pub const PENDING_VOTES_CAP: usize = 20; const UNREACHABLE_BACKOFF: Duration = Duration::from_secs(10); pub struct StoreInfo { pub engine: Arc<DB>, pub capacity: u64, } pub struct StoreMeta { /// store id pub store_id: Option<u64>, /// region_end_key -> region_id pub region_ranges: BTreeMap<Vec<u8>, u64>, /// region_id -> region pub regions: HashMap<u64, Region>, /// region_id -> reader pub readers: HashMap<u64, ReadDelegate>, /// `MsgRequestPreVote` or `MsgRequestVote` messages from newly split Regions shouldn't be dropped if there is no /// such Region in this store now. So the messages are recorded temporarily and will be handled later. pub pending_votes: RingQueue<RaftMessage>, /// The regions with pending snapshots. pub pending_snapshot_regions: Vec<Region>, /// A marker used to indicate the peer of a Region has received a merge target message and waits to be destroyed. /// target_region_id -> (source_region_id -> merge_target_epoch) pub pending_merge_targets: HashMap<u64, HashMap<u64, RegionEpoch>>, /// An inverse mapping of `pending_merge_targets` used to let source peer help target peer to clean up related entry. /// source_region_id -> target_region_id pub targets_map: HashMap<u64, u64>, /// In raftstore, the execute order of `PrepareMerge` and `CommitMerge` is not certain because of the messages /// belongs two regions. To make them in order, `PrepareMerge` will set this structure and `CommitMerge` will retry /// later if there is no related lock. /// source_region_id -> (version, BiLock). pub merge_locks: HashMap<u64, (u64, Option<Arc<AtomicBool>>)>, } impl StoreMeta { pub fn new(vote_capacity: usize) -> StoreMeta { StoreMeta { store_id: None, region_ranges: BTreeMap::default(), regions: HashMap::default(), readers: HashMap::default(), pending_votes: RingQueue::with_capacity(vote_capacity), pending_snapshot_regions: Vec::default(), pending_merge_targets: HashMap::default(), targets_map: HashMap::default(), merge_locks: HashMap::default(), } } #[inline] pub fn set_region( &mut self, host: &CoprocessorHost, region: Region, peer: &mut crate::raftstore::store::Peer, ) { let prev = self.regions.insert(region.get_id(), region.clone()); if prev.map_or(true, |r| r.get_id() != region.get_id()) { // TODO: may not be a good idea to panic when holding a lock. panic!("{} region corrupted", peer.tag); } let reader = self.readers.get_mut(&region.get_id()).unwrap(); peer.set_region(host, reader, region); } } pub type RaftRouter = BatchRouter<PeerFsm, StoreFsm>; impl RaftRouter { pub fn send_raft_message( &self, mut msg: RaftMessage, ) -> std::result::Result<(), TrySendError<RaftMessage>> { let id = msg.get_region_id(); match self.try_send(id, PeerMsg::RaftMessage(msg)) { Either::Left(Ok(())) => return Ok(()), Either::Left(Err(TrySendError::Full(PeerMsg::RaftMessage(m)))) => { return Err(TrySendError::Full(m)); } Either::Left(Err(TrySendError::Disconnected(PeerMsg::RaftMessage(m)))) => { return Err(TrySendError::Disconnected(m)); } Either::Right(PeerMsg::RaftMessage(m)) => msg = m, _ => unreachable!(), } match self.send_control(StoreMsg::RaftMessage(msg)) { Ok(()) => Ok(()), Err(TrySendError::Full(StoreMsg::RaftMessage(m))) => Err(TrySendError::Full(m)), Err(TrySendError::Disconnected(StoreMsg::RaftMessage(m))) => { Err(TrySendError::Disconnected(m)) } _ => unreachable!(), } } #[inline] pub fn send_raft_command( &self, cmd: RaftCommand, ) -> std::result::Result<(), TrySendError<RaftCommand>> { let region_id = cmd.request.get_header().get_region_id(); match self.send(region_id, PeerMsg::RaftCommand(cmd)) { Ok(()) => Ok(()), Err(TrySendError::Full(PeerMsg::RaftCommand(cmd))) => Err(TrySendError::Full(cmd)), Err(TrySendError::Disconnected(PeerMsg::RaftCommand(cmd))) => { Err(TrySendError::Disconnected(cmd)) } _ => unreachable!(), } } fn report_unreachable(&self, store_id: u64) { self.broadcast_normal(|| { PeerMsg::SignificantMsg(SignificantMsg::StoreUnreachable { store_id }) }); } } pub struct PollContext<T, C: 'static> { pub cfg: Arc<Config>, pub store: metapb::Store, pub pd_scheduler: FutureScheduler<PdTask>, pub consistency_check_scheduler: Scheduler<ConsistencyCheckTask>, pub split_check_scheduler: Scheduler<SplitCheckTask>, // handle Compact, CleanupSST task pub cleanup_scheduler: Scheduler<CleanupTask>, pub raftlog_gc_scheduler: Scheduler<RaftlogGcTask>, pub region_scheduler: Scheduler<RegionTask>, pub apply_router: ApplyRouter, pub router: RaftRouter, pub importer: Arc<SSTImporter>, pub store_meta: Arc<Mutex<StoreMeta>>, pub future_poller: ThreadPoolSender, pub raft_metrics: RaftMetrics, pub snap_mgr: SnapManager, pub applying_snap_count: Arc<AtomicUsize>, pub coprocessor_host: Arc<CoprocessorHost>, pub timer: SteadyTimer, pub trans: T, pub pd_client: Arc<C>, pub global_stat: GlobalStoreStat, pub store_stat: LocalStoreStat, pub engines: Engines, pub kv_wb: WriteBatch, pub raft_wb: WriteBatch, pub pending_count: usize, pub sync_log: bool, pub has_ready: bool, pub ready_res: Vec<(Ready, InvokeContext)>, pub need_flush_trans: bool, pub queued_snapshot: HashSet<u64>, pub current_time: Option<Timespec>, } impl<T, C> HandleRaftReadyContext for PollContext<T, C> { #[inline] fn kv_wb(&self) -> &WriteBatch { &self.kv_wb } #[inline] fn kv_wb_mut(&mut self) -> &mut WriteBatch { &mut self.kv_wb } #[inline] fn raft_wb(&self) -> &WriteBatch { &self.raft_wb } #[inline] fn raft_wb_mut(&mut self) -> &mut WriteBatch { &mut self.raft_wb } #[inline] fn sync_log(&self) -> bool { self.sync_log } #[inline] fn set_sync_log(&mut self, sync: bool) { self.sync_log = sync; } } impl<T, C> PollContext<T, C> { #[inline] pub fn store_id(&self) -> u64 { self.store.get_id() } } impl<T: Transport, C> PollContext<T, C> { #[inline] fn schedule_store_tick(&self, tick: StoreTick, timeout: Duration) { if !is_zero_duration(&timeout) { let mb = self.router.control_mailbox(); let f = self .timer .delay(timeout) .map(move |_| { if let Err(e) = mb.force_send(StoreMsg::Tick(tick)) { info!( "failed to schedule store tick, are we shutting down?"; "tick" => ?tick, "err" => ?e ); } }) .map_err(move |e| { panic!("tick {:?} is lost due to timeout error: {:?}", tick, e); }); self.future_poller.spawn(f).unwrap(); } } pub fn handle_stale_msg( &mut self, msg: &RaftMessage, cur_epoch: RegionEpoch, need_gc: bool, target_region: Option<metapb::Region>, ) { let region_id = msg.get_region_id(); let from_peer = msg.get_from_peer(); let to_peer = msg.get_to_peer(); let msg_type = msg.get_message().get_msg_type(); if !need_gc { info!( "raft message is stale, ignore it"; "region_id" => region_id, "current_region_epoch" => ?cur_epoch, "msg_type" => ?msg_type, ); self.raft_metrics.message_dropped.stale_msg += 1; return; } info!( "raft message is stale, tell to gc"; "region_id" => region_id, "current_region_epoch" => ?cur_epoch, "msg_type" => ?msg_type, ); let mut gc_msg = RaftMessage::default(); gc_msg.set_region_id(region_id); gc_msg.set_from_peer(to_peer.clone()); gc_msg.set_to_peer(from_peer.clone()); gc_msg.set_region_epoch(cur_epoch.clone()); if let Some(r) = target_region { gc_msg.set_merge_target(r); } else { gc_msg.set_is_tombstone(true); } if let Err(e) = self.trans.send(gc_msg) { error!( "send gc message failed"; "region_id" => region_id, "err" => ?e ); } self.need_flush_trans = true; } } struct Store { // store id, before start the id is 0. id: u64, last_compact_checked_key: Key, stopped: bool, start_time: Option<Timespec>, consistency_check_time: HashMap<u64, Instant>, last_unreachable_report: HashMap<u64, Instant>, } pub struct StoreFsm { store: Store, receiver: Receiver<StoreMsg>, } impl StoreFsm { pub fn new(cfg: &Config) -> (LooseBoundedSender<StoreMsg>, Box<StoreFsm>) { let (tx, rx) = mpsc::loose_bounded(cfg.notify_capacity); let fsm = Box::new(StoreFsm { store: Store { id: 0, last_compact_checked_key: keys::DATA_MIN_KEY.to_vec(), stopped: false, start_time: None, consistency_check_time: HashMap::default(), last_unreachable_report: HashMap::default(), }, receiver: rx, }); (tx, fsm) } } impl Fsm for StoreFsm { type Message = StoreMsg; #[inline] fn is_stopped(&self) -> bool { self.store.stopped } } struct StoreFsmDelegate<'a, T: 'static, C: 'static> { fsm: &'a mut StoreFsm, ctx: &'a mut PollContext<T, C>, } impl<'a, T: Transport, C: PdClient> StoreFsmDelegate<'a, T, C> { fn on_tick(&mut self, tick: StoreTick) { let t = SlowTimer::new(); match tick { StoreTick::PdStoreHeartbeat => self.on_pd_store_heartbeat_tick(), StoreTick::SnapGc => self.on_snap_mgr_gc(), StoreTick::CompactLockCf => self.on_compact_lock_cf(), StoreTick::CompactCheck => self.on_compact_check_tick(), StoreTick::ConsistencyCheck => self.on_consistency_check_tick(), StoreTick::CleanupImportSST => self.on_cleanup_import_sst_tick(), } RAFT_EVENT_DURATION .with_label_values(&[tick.tag()]) .observe(duration_to_sec(t.elapsed()) as f64); slow_log!(t, "[store {}] handle timeout {:?}", self.fsm.store.id, tick); } fn handle_msgs(&mut self, msgs: &mut Vec<StoreMsg>) { for m in msgs.drain(..) { match m { StoreMsg::Tick(tick) => self.on_tick(tick), StoreMsg::RaftMessage(msg) => { if let Err(e) = self.on_raft_message(msg) { error!( "handle raft message failed"; "store_id" => self.fsm.store.id, "err" => ?e ); } } StoreMsg::CompactedEvent(event) => self.on_compaction_finished(event), StoreMsg::ValidateSSTResult { invalid_ssts } => { self.on_validate_sst_result(invalid_ssts) } StoreMsg::ClearRegionSizeInRange { start_key, end_key } => { self.clear_region_size_in_range(&start_key, &end_key) } StoreMsg::SnapshotStats => self.store_heartbeat_pd(), StoreMsg::StoreUnreachable { store_id } => { self.on_store_unreachable(store_id); } StoreMsg::Start { store } => self.start(store), } } } fn start(&mut self, store: metapb::Store) { if self.fsm.store.start_time.is_some() { panic!( "[store {}] unable to start again with meta {:?}", self.fsm.store.id, store ); } self.fsm.store.id = store.get_id(); self.fsm.store.start_time = Some(time::get_time()); self.register_cleanup_import_sst_tick(); self.register_compact_check_tick(); self.register_pd_store_heartbeat_tick(); self.register_compact_lock_cf_tick(); self.register_snap_mgr_gc_tick(); self.register_consistency_check_tick(); } } pub struct RaftPoller<T: 'static, C: 'static> { tag: String, store_msg_buf: Vec<StoreMsg>, peer_msg_buf: Vec<PeerMsg>, previous_metrics: RaftMetrics, timer: SlowTimer, poll_ctx: PollContext<T, C>, pending_proposals: Vec<RegionProposal>, messages_per_tick: usize, } impl<T: Transport, C: PdClient> RaftPoller<T, C> { fn handle_raft_ready(&mut self, peers: &mut [Box<PeerFsm>]) { // Only enable the fail point when the store id is equal to 3, which is // the id of slow store in tests. fail_point!("on_raft_ready", self.poll_ctx.store_id() == 3, |_| {}); if !self.pending_proposals.is_empty() { for prop in self.pending_proposals.drain(..) { self.poll_ctx .apply_router .schedule_task(prop.region_id, ApplyTask::Proposal(prop)); } } if self.poll_ctx.need_flush_trans && (!self.poll_ctx.kv_wb.is_empty() || !self.poll_ctx.raft_wb.is_empty()) { self.poll_ctx.trans.flush(); self.poll_ctx.need_flush_trans = false; } let ready_cnt = self.poll_ctx.ready_res.len(); self.poll_ctx.raft_metrics.ready.has_ready_region += ready_cnt as u64; fail_point!("raft_before_save"); if !self.poll_ctx.kv_wb.is_empty() { let mut write_opts = WriteOptions::new(); write_opts.set_sync(true); self.poll_ctx .engines .kv .write_opt(&self.poll_ctx.kv_wb, &write_opts) .unwrap_or_else(|e| { panic!("{} failed to save append state result: {:?}", self.tag, e); }); let data_size = self.poll_ctx.kv_wb.data_size(); if data_size > KV_WB_SHRINK_SIZE { self.poll_ctx.kv_wb = WriteBatch::with_capacity(4 * 1024); } else { self.poll_ctx.kv_wb.clear(); } } fail_point!("raft_between_save"); if !self.poll_ctx.raft_wb.is_empty() { let mut write_opts = WriteOptions::new(); write_opts.set_sync(self.poll_ctx.cfg.sync_log || self.poll_ctx.sync_log); self.poll_ctx .engines .raft .write_opt(&self.poll_ctx.raft_wb, &write_opts) .unwrap_or_else(|e| { panic!("{} failed to save raft append result: {:?}", self.tag, e); }); let data_size = self.poll_ctx.raft_wb.data_size(); if data_size > RAFT_WB_SHRINK_SIZE { self.poll_ctx.raft_wb = WriteBatch::with_capacity(4 * 1024); } else { self.poll_ctx.raft_wb.clear(); } } fail_point!("raft_after_save"); if ready_cnt != 0 { let mut batch_pos = 0; let mut ready_res = mem::replace(&mut self.poll_ctx.ready_res, Vec::default()); for (ready, invoke_ctx) in ready_res.drain(..) { let region_id = invoke_ctx.region_id; if peers[batch_pos].region_id() == region_id { } else { while peers[batch_pos].region_id() != region_id { batch_pos += 1; } } PeerFsmDelegate::new(&mut peers[batch_pos], &mut self.poll_ctx) .post_raft_ready_append(ready, invoke_ctx); } } let dur = self.timer.elapsed(); if !self.poll_ctx.store_stat.is_busy { let election_timeout = Duration::from_millis( self.poll_ctx.cfg.raft_base_tick_interval.as_millis() * self.poll_ctx.cfg.raft_election_timeout_ticks as u64, ); if dur >= election_timeout { self.poll_ctx.store_stat.is_busy = true; } } self.poll_ctx .raft_metrics .append_log .observe(duration_to_sec(dur) as f64); slow_log!( self.timer, "{} handle {} pending peers include {} ready, {} entries, {} messages and {} \ snapshots", self.tag, self.poll_ctx.pending_count, ready_cnt, self.poll_ctx.raft_metrics.ready.append - self.previous_metrics.ready.append, self.poll_ctx.raft_metrics.ready.message - self.previous_metrics.ready.message, self.poll_ctx.raft_metrics.ready.snapshot - self.previous_metrics.ready.snapshot ); } } impl<T: Transport, C: PdClient> PollHandler<PeerFsm, StoreFsm> for RaftPoller<T, C> { fn begin(&mut self, batch_size: usize) { self.previous_metrics = self.poll_ctx.raft_metrics.clone(); self.poll_ctx.pending_count = 0; self.poll_ctx.sync_log = false; self.poll_ctx.has_ready = false; if self.pending_proposals.capacity() == 0 { self.pending_proposals = Vec::with_capacity(batch_size); } self.timer = SlowTimer::new(); } fn handle_control(&mut self, store: &mut StoreFsm) -> Option<usize> { let mut expected_msg_count = None; while self.store_msg_buf.len() < self.messages_per_tick { match store.receiver.try_recv() { Ok(msg) => self.store_msg_buf.push(msg), Err(TryRecvError::Empty) => { expected_msg_count = Some(0); break; } Err(TryRecvError::Disconnected) => { store.store.stopped = true; expected_msg_count = Some(0); break; } } } let mut delegate = StoreFsmDelegate { fsm: store, ctx: &mut self.poll_ctx, }; delegate.handle_msgs(&mut self.store_msg_buf); expected_msg_count } fn handle_normal(&mut self, peer: &mut PeerFsm) -> Option<usize> { let mut expected_msg_count = None; if peer.have_pending_merge_apply_result() { expected_msg_count = Some(peer.receiver.len()); let mut delegate = PeerFsmDelegate::new(peer, &mut self.poll_ctx); if !delegate.resume_handling_pending_apply_result() { return expected_msg_count; } expected_msg_count = None; } fail_point!( "pause_on_peer_collect_message", peer.peer_id() == 1, |_| unreachable!() ); while self.peer_msg_buf.len() < self.messages_per_tick { match peer.receiver.try_recv() { // TODO: we may need a way to optimize the message copy. Ok(msg) => { fail_point!( "pause_on_peer_destroy_res", peer.peer_id() == 1 && match msg { PeerMsg::ApplyRes { res: ApplyTaskRes::Destroy { .. }, } => true, _ => false, }, |_| unreachable!() ); self.peer_msg_buf.push(msg) } Err(TryRecvError::Empty) => { expected_msg_count = Some(0); break; } Err(TryRecvError::Disconnected) => { peer.stop(); expected_msg_count = Some(0); break; } } } let mut delegate = PeerFsmDelegate::new(peer, &mut self.poll_ctx); delegate.handle_msgs(&mut self.peer_msg_buf); delegate.collect_ready(&mut self.pending_proposals); expected_msg_count } fn end(&mut self, peers: &mut [Box<PeerFsm>]) { if self.poll_ctx.has_ready { self.handle_raft_ready(peers); } self.poll_ctx.current_time = None; if !self.poll_ctx.queued_snapshot.is_empty() { let mut meta = self.poll_ctx.store_meta.lock().unwrap(); meta.pending_snapshot_regions .retain(|r| !self.poll_ctx.queued_snapshot.contains(&r.get_id())); self.poll_ctx.queued_snapshot.clear(); } self.poll_ctx .raft_metrics .process_ready .observe(duration_to_sec(self.timer.elapsed()) as f64); self.poll_ctx.raft_metrics.flush(); self.poll_ctx.store_stat.flush(); } fn pause(&mut self) { if self.poll_ctx.need_flush_trans { self.poll_ctx.trans.flush(); self.poll_ctx.need_flush_trans = false; } } } pub struct RaftPollerBuilder<T, C> { pub cfg: Arc<Config>, pub store: metapb::Store, pd_scheduler: FutureScheduler<PdTask>, consistency_check_scheduler: Scheduler<ConsistencyCheckTask>, split_check_scheduler: Scheduler<SplitCheckTask>, cleanup_scheduler: Scheduler<CleanupTask>, raftlog_gc_scheduler: Scheduler<RaftlogGcTask>, pub region_scheduler: Scheduler<RegionTask>, apply_router: ApplyRouter, pub router: RaftRouter, pub importer: Arc<SSTImporter>, store_meta: Arc<Mutex<StoreMeta>>, future_poller: ThreadPoolSender, snap_mgr: SnapManager, pub coprocessor_host: Arc<CoprocessorHost>, trans: T, pd_client: Arc<C>, global_stat: GlobalStoreStat, pub engines: Engines, applying_snap_count: Arc<AtomicUsize>, } impl<T, C> RaftPollerBuilder<T, C> { /// Initialize this store. It scans the db engine, loads all regions /// and their peers from it, and schedules snapshot worker if necessary. /// WARN: This store should not be used before initialized. fn init(&mut self) -> Result<Vec<(LooseBoundedSender<PeerMsg>, Box<PeerFsm>)>> { // Scan region meta to get saved regions. let start_key = keys::REGION_META_MIN_KEY; let end_key = keys::REGION_META_MAX_KEY; let kv_engine = Arc::clone(&self.engines.kv); let store_id = self.store.get_id(); let mut total_count = 0; let mut tombstone_count = 0; let mut applying_count = 0; let mut region_peers = vec![]; let t = Instant::now(); let mut kv_wb = WriteBatch::default(); let mut raft_wb = WriteBatch::default(); let mut applying_regions = vec![]; let mut merging_count = 0; let mut meta = self.store_meta.lock().unwrap(); kv_engine.scan_cf(CF_RAFT, start_key, end_key, false, |key, value| { let (region_id, suffix) = box_try!(keys::decode_region_meta_key(key)); if suffix != keys::REGION_STATE_SUFFIX { return Ok(true); } total_count += 1; let mut local_state = RegionLocalState::default(); local_state.merge_from_bytes(value)?; let region = local_state.get_region(); if local_state.get_state() == PeerState::Tombstone { tombstone_count += 1; debug!("region is tombstone"; "region" => ?region, "store_id" => store_id); self.clear_stale_meta(&mut kv_wb, &mut raft_wb, &local_state); return Ok(true); } if local_state.get_state() == PeerState::Applying { // in case of restart happen when we just write region state to Applying, // but not write raft_local_state to raft rocksdb in time. box_try!(peer_storage::recover_from_applying_state( &self.engines, &raft_wb, region_id )); applying_count += 1; applying_regions.push(region.clone()); return Ok(true); } let (tx, mut peer) = box_try!(PeerFsm::create( store_id, &self.cfg, self.region_scheduler.clone(), self.engines.clone(), region, )); if local_state.get_state() == PeerState::Merging { info!("region is merging"; "region" => ?region, "store_id" => store_id); merging_count += 1; peer.set_pending_merge_state(local_state.get_merge_state().to_owned()); } meta.region_ranges.insert(enc_end_key(region), region_id); meta.regions.insert(region_id, region.clone()); // No need to check duplicated here, because we use region id as the key // in DB. region_peers.push((tx, peer)); self.coprocessor_host.on_region_changed( region, RegionChangeEvent::Create, StateRole::Follower, ); Ok(true) })?; if !kv_wb.is_empty() { self.engines.kv.write(&kv_wb).unwrap(); self.engines.kv.sync_wal().unwrap(); } if !raft_wb.is_empty() { self.engines.raft.write(&raft_wb).unwrap(); self.engines.raft.sync_wal().unwrap(); } // schedule applying snapshot after raft writebatch were written. for region in applying_regions { info!("region is applying snapshot"; "region" => ?region, "store_id" => store_id); let (tx, mut peer) = PeerFsm::create( store_id, &self.cfg, self.region_scheduler.clone(), self.engines.clone(), &region, )?; peer.schedule_applying_snapshot(); meta.region_ranges .insert(enc_end_key(&region), region.get_id()); meta.regions.insert(region.get_id(), region); region_peers.push((tx, peer)); } info!( "start store"; "store_id" => store_id, "region_count" => total_count, "tombstone_count" => tombstone_count, "applying_count" => applying_count, "merge_count" => merging_count, "takes" => ?t.elapsed(), ); self.clear_stale_data(&meta)?; Ok(region_peers) } fn clear_stale_meta( &self, kv_wb: &mut WriteBatch, raft_wb: &mut WriteBatch, origin_state: &RegionLocalState, ) { let region = origin_state.get_region(); let raft_key = keys::raft_state_key(region.get_id()); let raft_state = match self.engines.raft.get_msg(&raft_key).unwrap() { // it has been cleaned up. None => return, Some(value) => value, }; peer_storage::clear_meta(&self.engines, kv_wb, raft_wb, region.get_id(), &raft_state) .unwrap(); let key = keys::region_state_key(region.get_id()); let handle = rocks::util::get_cf_handle(&self.engines.kv, CF_RAFT).unwrap(); kv_wb.put_msg_cf(handle, &key, origin_state).unwrap(); } /// `clear_stale_data` clean up all possible garbage data. fn clear_stale_data(&self, meta: &StoreMeta) -> Result<()> { let t = Instant::now(); let mut ranges = Vec::new(); let mut last_start_key = keys::data_key(b""); for region_id in meta.region_ranges.values() { let region = &meta.regions[region_id]; let start_key = keys::enc_start_key(region); ranges.push((last_start_key, start_key)); last_start_key = keys::enc_end_key(region); } ranges.push((last_start_key, keys::DATA_MAX_KEY.to_vec())); rocks::util::roughly_cleanup_ranges(&self.engines.kv, &ranges)?; info!( "cleans up garbage data"; "store_id" => self.store.get_id(), "garbage_range_count" => ranges.len(), "takes" => ?t.elapsed() ); Ok(()) } } impl<T, C> HandlerBuilder<PeerFsm, StoreFsm> for RaftPollerBuilder<T, C> where T: Transport + 'static, C: PdClient + 'static, { type Handler = RaftPoller<T, C>; fn build(&mut self) -> RaftPoller<T, C> { let ctx = PollContext { cfg: self.cfg.clone(), store: self.store.clone(), pd_scheduler: self.pd_scheduler.clone(), consistency_check_scheduler: self.consistency_check_scheduler.clone(), split_check_scheduler: self.split_check_scheduler.clone(), region_scheduler: self.region_scheduler.clone(), apply_router: self.apply_router.clone(), router: self.router.clone(), cleanup_scheduler: self.cleanup_scheduler.clone(), raftlog_gc_scheduler: self.raftlog_gc_scheduler.clone(), importer: self.importer.clone(), store_meta: self.store_meta.clone(), future_poller: self.future_poller.clone(), raft_metrics: RaftMetrics::default(), snap_mgr: self.snap_mgr.clone(), applying_snap_count: self.applying_snap_count.clone(), coprocessor_host: self.coprocessor_host.clone(), timer: SteadyTimer::default(), trans: self.trans.clone(), pd_client: self.pd_client.clone(), global_stat: self.global_stat.clone(), store_stat: self.global_stat.local(), engines: self.engines.clone(), kv_wb: WriteBatch::default(), raft_wb: WriteBatch::with_capacity(4 * 1024), pending_count: 0, sync_log: false, has_ready: false, ready_res: Vec::new(), need_flush_trans: false, queued_snapshot: HashSet::default(), current_time: None, }; RaftPoller { tag: format!("[store {}]", ctx.store.get_id()), store_msg_buf: Vec::with_capacity(ctx.cfg.messages_per_tick), peer_msg_buf: Vec::with_capacity(ctx.cfg.messages_per_tick), previous_metrics: ctx.raft_metrics.clone(), timer: SlowTimer::new(), messages_per_tick: ctx.cfg.messages_per_tick, poll_ctx: ctx, pending_proposals: Vec::new(), } } } struct Workers { pd_worker: FutureWorker<PdTask>, consistency_check_worker: Worker<ConsistencyCheckTask>, split_check_worker: Worker<SplitCheckTask>, // handle Compact, CleanupSST task cleanup_worker: Worker<CleanupTask>, raftlog_gc_worker: Worker<RaftlogGcTask>, region_worker: Worker<RegionTask>, coprocessor_host: Arc<CoprocessorHost>, future_poller: ThreadPool, } pub struct RaftBatchSystem { system: BatchSystem<PeerFsm, StoreFsm>, apply_router: ApplyRouter, apply_system: ApplyBatchSystem, router: RaftRouter, workers: Option<Workers>, } impl RaftBatchSystem { pub fn router(&self) -> RaftRouter { self.router.clone() } pub fn spawn<T: Transport + 'static, C: PdClient + 'static>( &mut self, meta: metapb::Store, mut cfg: Config, engines: Engines, trans: T, pd_client: Arc<C>, mgr: SnapManager, pd_worker: FutureWorker<PdTask>, store_meta: Arc<Mutex<StoreMeta>>, mut coprocessor_host: CoprocessorHost, importer: Arc<SSTImporter>, ) -> Result<()> { assert!(self.workers.is_none()); // TODO: we can get cluster meta regularly too later. cfg.validate()?; // TODO load coprocessors from configuration coprocessor_host .registry .register_admin_observer(100, Box::new(SplitObserver)); let workers = Workers { split_check_worker: Worker::new("split-check"), region_worker: Worker::new("snapshot-worker"), pd_worker, consistency_check_worker: Worker::new("consistency-check"), cleanup_worker: Worker::new("cleanup-worker"), raftlog_gc_worker: Worker::new("raft-gc-worker"), coprocessor_host: Arc::new(coprocessor_host), future_poller: tokio_threadpool::Builder::new() .name_prefix("future-poller") .pool_size(cfg.future_poll_size) .build(), }; let mut builder = RaftPollerBuilder { cfg: Arc::new(cfg), store: meta, engines, router: self.router.clone(), split_check_scheduler: workers.split_check_worker.scheduler(), region_scheduler: workers.region_worker.scheduler(), pd_scheduler: workers.pd_worker.scheduler(), consistency_check_scheduler: workers.consistency_check_worker.scheduler(), cleanup_scheduler: workers.cleanup_worker.scheduler(), raftlog_gc_scheduler: workers.raftlog_gc_worker.scheduler(), apply_router: self.apply_router.clone(), trans, pd_client, coprocessor_host: workers.coprocessor_host.clone(), importer, snap_mgr: mgr, global_stat: GlobalStoreStat::default(), store_meta, applying_snap_count: Arc::new(AtomicUsize::new(0)), future_poller: workers.future_poller.sender().clone(), }; let region_peers = builder.init()?; self.start_system(workers, region_peers, builder)?; Ok(()) } fn start_system<T: Transport + 'static, C: PdClient + 'static>( &mut self, mut workers: Workers, region_peers: Vec<(LooseBoundedSender<PeerMsg>, Box<PeerFsm>)>, builder: RaftPollerBuilder<T, C>, ) -> Result<()> { builder.snap_mgr.init()?; let engines = builder.engines.clone(); let snap_mgr = builder.snap_mgr.clone(); let cfg = builder.cfg.clone(); let store = builder.store.clone(); let pd_client = builder.pd_client.clone(); let importer = builder.importer.clone(); let apply_poller_builder = ApplyPollerBuilder::new( &builder, ApplyNotifier::Router(self.router.clone()), self.apply_router.clone(), ); self.apply_system .schedule_all(region_peers.iter().map(|pair| pair.1.get_peer())); { let mut meta = builder.store_meta.lock().unwrap(); for (_, peer_fsm) in &region_peers { let peer = peer_fsm.get_peer(); meta.readers .insert(peer_fsm.region_id(), ReadDelegate::from_peer(peer)); } } let router = Mutex::new(self.router.clone()); pd_client.handle_reconnect(move || { router .lock() .unwrap() .broadcast_normal(|| PeerMsg::HeartbeatPd); }); let tag = format!("raftstore-{}", store.get_id()); self.system.spawn(tag, builder); let mut mailboxes = Vec::with_capacity(region_peers.len()); let mut address = Vec::with_capacity(region_peers.len()); for (tx, fsm) in region_peers { address.push(fsm.region_id()); mailboxes.push((fsm.region_id(), BasicMailbox::new(tx, fsm))); } self.router.register_all(mailboxes); // Make sure Msg::Start is the first message each FSM received. for addr in address { self.router.force_send(addr, PeerMsg::Start).unwrap(); } self.router .send_control(StoreMsg::Start { store: store.clone(), }) .unwrap(); self.apply_system .spawn("apply".to_owned(), apply_poller_builder); let split_check_runner = SplitCheckRunner::new( Arc::clone(&engines.kv), self.router.clone(), Arc::clone(&workers.coprocessor_host), ); box_try!(workers.split_check_worker.start(split_check_runner)); let region_runner = RegionRunner::new( engines.clone(), snap_mgr, cfg.snap_apply_batch_size.0 as usize, cfg.use_delete_range, cfg.clean_stale_peer_delay.0, ); let timer = RegionRunner::new_timer(); box_try!(workers.region_worker.start_with_timer(region_runner, timer)); let raftlog_gc_runner = RaftlogGcRunner::new(None); box_try!(workers.raftlog_gc_worker.start(raftlog_gc_runner)); let compact_runner = CompactRunner::new(Arc::clone(&engines.kv)); let cleanup_sst_runner = CleanupSSTRunner::new( store.get_id(), self.router.clone(), Arc::clone(&importer), Arc::clone(&pd_client), ); let cleanup_runner = CleanupRunner::new(compact_runner, cleanup_sst_runner); box_try!(workers.cleanup_worker.start(cleanup_runner)); let pd_runner = PdRunner::new( store.get_id(), Arc::clone(&pd_client), self.router.clone(), Arc::clone(&engines.kv), workers.pd_worker.scheduler(), cfg.pd_store_heartbeat_tick_interval.as_secs(), ); box_try!(workers.pd_worker.start(pd_runner)); let consistency_check_runner = ConsistencyCheckRunner::new(self.router.clone()); box_try!(workers .consistency_check_worker .start(consistency_check_runner)); if let Err(e) = sys_util::thread::set_priority(sys_util::HIGH_PRI) { warn!("set thread priority for raftstore failed"; "error" => ?e); } self.workers = Some(workers); Ok(()) } pub fn shutdown(&mut self) { if self.workers.is_none() { return; } let mut workers = self.workers.take().unwrap(); // Wait all workers finish. let mut handles: Vec<Option<thread::JoinHandle<()>>> = vec![]; handles.push(workers.split_check_worker.stop()); handles.push(workers.region_worker.stop()); handles.push(workers.pd_worker.stop()); handles.push(workers.consistency_check_worker.stop()); handles.push(workers.cleanup_worker.stop()); handles.push(workers.raftlog_gc_worker.stop()); self.apply_system.shutdown(); self.system.shutdown(); for h in handles { if let Some(h) = h { h.join().unwrap(); } } workers.coprocessor_host.shutdown(); workers.future_poller.shutdown_now().wait().unwrap(); } } pub fn create_raft_batch_system(cfg: &Config) -> (RaftRouter, RaftBatchSystem) { let (store_tx, store_fsm) = StoreFsm::new(cfg); let (apply_router, apply_system) = create_apply_batch_system(&cfg); let (router, system) = batch::create_system( cfg.store_pool_size, cfg.store_max_batch_size, store_tx, store_fsm, ); let system = RaftBatchSystem { system, workers: None, apply_router, apply_system, router: router.clone(), }; (router, system) } impl<'a, T: Transport, C: PdClient> StoreFsmDelegate<'a, T, C> { /// Checks if the message is targeting a stale peer. /// /// Returns true means the message can be dropped silently. fn check_msg(&mut self, msg: &RaftMessage) -> Result<bool> { let region_id = msg.get_region_id(); let from_epoch = msg.get_region_epoch(); let msg_type = msg.get_message().get_msg_type(); let is_vote_msg = util::is_vote_msg(msg.get_message()); let from_store_id = msg.get_from_peer().get_store_id(); // Check if the target peer is tombstone. let state_key = keys::region_state_key(region_id); let local_state: RegionLocalState = match self.ctx.engines.kv.get_msg_cf(CF_RAFT, &state_key)? { Some(state) => state, None => return Ok(false), }; if local_state.get_state() != PeerState::Tombstone { // Maybe split, but not registered yet. self.ctx.raft_metrics.message_dropped.region_nonexistent += 1; if util::is_first_vote_msg(msg.get_message()) { let mut meta = self.ctx.store_meta.lock().unwrap(); // Last check on whether target peer is created, otherwise, the // vote message will never be consumed. if meta.regions.contains_key(&region_id) { return Ok(false); } meta.pending_votes.push(msg.to_owned()); info!( "region doesn't exist yet, wait for it to be split"; "region_id" => region_id ); return Ok(true); } return Err(box_err!( "[region {}] region not exist but not tombstone: {:?}", region_id, local_state )); } debug!( "region is in tombstone state"; "region_id" => region_id, "region_local_state" => ?local_state, ); let region = local_state.get_region(); let region_epoch = region.get_region_epoch(); if local_state.has_merge_state() { info!( "merged peer receives a stale message"; "region_id" => region_id, "current_region_epoch" => ?region_epoch, "msg_type" => ?msg_type, ); let merge_target = if let Some(peer) = util::find_peer(region, from_store_id) { // Maybe the target is promoted from learner to voter, but the follower // doesn't know it. So we only compare peer id. assert_eq!(peer.get_id(), msg.get_from_peer().get_id()); // Let stale peer decides whether it should wait for merging or just remove // itself. Some(local_state.get_merge_state().get_target().to_owned()) } else { // If a peer is isolated before prepare_merge and conf remove, it should just // remove itself. None }; self.ctx .handle_stale_msg(msg, region_epoch.clone(), true, merge_target); return Ok(true); } // The region in this peer is already destroyed if util::is_epoch_stale(from_epoch, region_epoch) { info!( "tombstone peer receives a stale message"; "region_id" => region_id, "from_region_epoch" => ?from_epoch, "current_region_epoch" => ?region_epoch, "msg_type" => ?msg_type, ); let not_exist = util::find_peer(region, from_store_id).is_none(); self.ctx .handle_stale_msg(msg, region_epoch.clone(), is_vote_msg && not_exist, None); return Ok(true); } if from_epoch.get_conf_ver() == region_epoch.get_conf_ver() { self.ctx.raft_metrics.message_dropped.region_tombstone_peer += 1; return Err(box_err!( "tombstone peer [epoch: {:?}] receive an invalid \ message {:?}, ignore it", region_epoch, msg_type )); } Ok(false) } fn on_raft_message(&mut self, mut msg: RaftMessage) -> Result<()> { let region_id = msg.get_region_id(); match self.ctx.router.send(region_id, PeerMsg::RaftMessage(msg)) { Ok(()) | Err(TrySendError::Full(_)) => return Ok(()), Err(TrySendError::Disconnected(PeerMsg::RaftMessage(m))) => msg = m, e => panic!( "[store {}] [region {}] unexpected redirect error: {:?}", self.fsm.store.id, region_id, e ), } debug!( "handle raft message"; "from_peer_id" => msg.get_from_peer().get_id(), "to_peer_id" => msg.get_to_peer().get_id(), "store_id" => self.fsm.store.id, "region_id" => region_id, "msg_type" => ?msg.get_message().get_msg_type(), ); if msg.get_to_peer().get_store_id() != self.ctx.store_id() { warn!( "store not match, ignore it"; "store_id" => self.ctx.store_id(), "to_store_id" => msg.get_to_peer().get_store_id(), "region_id" => region_id, ); self.ctx.raft_metrics.message_dropped.mismatch_store_id += 1; return Ok(()); } if !msg.has_region_epoch() { error!( "missing epoch in raft message, ignore it"; "region_id" => region_id, ); self.ctx.raft_metrics.message_dropped.mismatch_region_epoch += 1; return Ok(()); } if msg.get_is_tombstone() || msg.has_merge_target() { // Target tombstone peer doesn't exist, so ignore it. return Ok(()); } if self.check_msg(&msg)? { return Ok(()); } if !self.maybe_create_peer(region_id, &msg)? { return Ok(()); } let _ = self.ctx.router.send(region_id, PeerMsg::RaftMessage(msg)); Ok(()) } /// If target peer doesn't exist, create it. /// /// return false to indicate that target peer is in invalid state or /// doesn't exist and can't be created. fn maybe_create_peer(&mut self, region_id: u64, msg: &RaftMessage) -> Result<bool> { let target = msg.get_to_peer(); // we may encounter a message with larger peer id, which means // current peer is stale, then we should remove current peer let mut guard = self.ctx.store_meta.lock().unwrap(); let meta: &mut StoreMeta = &mut *guard; if meta.regions.contains_key(&region_id) { return Ok(true); } if !is_initial_msg(msg.get_message()) { let msg_type = msg.get_message().get_msg_type(); debug!( "target peer doesn't exist, stale message"; "target_peer" => ?target, "region_id" => region_id, "msg_type" => ?msg_type, ); self.ctx.raft_metrics.message_dropped.stale_msg += 1; return Ok(false); } let mut regions_to_destroy = vec![]; for (_, id) in meta.region_ranges.range(( Excluded(data_key(msg.get_start_key())), Unbounded::<Vec<u8>>, )) { let exist_region = &meta.regions[&id]; if enc_start_key(exist_region) >= data_end_key(msg.get_end_key()) { break; } debug!( "msg is overlapped with exist region"; "region_id" => region_id, "msg" => ?msg, "exist_region" => ?exist_region, ); if util::is_first_vote_msg(msg.get_message()) { meta.pending_votes.push(msg.to_owned()); } // Make sure the range of region from msg is covered by existing regions. // If so, means that the region may be generated by some kinds of split // and merge by catching logs. So there is no need to accept a snapshot. if !is_range_covered( &meta.region_ranges, |id: u64| &meta.regions[&id], data_key(msg.get_start_key()), data_end_key(msg.get_end_key()), ) { if maybe_destroy_source( meta, region_id, exist_region.get_id(), msg.get_region_epoch().to_owned(), ) { regions_to_destroy.push(exist_region.get_id()); continue; } } self.ctx.raft_metrics.message_dropped.region_overlap += 1; return Ok(false); } for id in regions_to_destroy { self.ctx .router .force_send( id, PeerMsg::CasualMessage(CasualMessage::MergeResult { target: target.clone(), stale: true, }), ) .unwrap(); } // New created peers should know it's learner or not. let (tx, peer) = PeerFsm::replicate( self.ctx.store_id(), &self.ctx.cfg, self.ctx.region_scheduler.clone(), self.ctx.engines.clone(), region_id, target.clone(), )?; // following snapshot may overlap, should insert into region_ranges after // snapshot is applied. meta.regions .insert(region_id, peer.get_peer().region().to_owned()); let mailbox = BasicMailbox::new(tx, peer); self.ctx.router.register(region_id, mailbox); self.ctx .router .force_send(region_id, PeerMsg::Start) .unwrap(); Ok(true) } fn on_compaction_finished(&mut self, event: CompactedEvent) { // If size declining is trivial, skip. let total_bytes_declined = if event.total_input_bytes > event.total_output_bytes { event.total_input_bytes - event.total_output_bytes } else { 0 }; if total_bytes_declined < self.ctx.cfg.region_split_check_diff.0 || total_bytes_declined * 10 < event.total_input_bytes { return; } let output_level_str = event.output_level.to_string(); COMPACTION_DECLINED_BYTES .with_label_values(&[&output_level_str]) .observe(total_bytes_declined as f64); // self.cfg.region_split_check_diff.0 / 16 is an experienced value. let mut region_declined_bytes = { let meta = self.ctx.store_meta.lock().unwrap(); calc_region_declined_bytes( event, &meta.region_ranges, self.ctx.cfg.region_split_check_diff.0 / 16, ) }; COMPACTION_RELATED_REGION_COUNT .with_label_values(&[&output_level_str]) .observe(region_declined_bytes.len() as f64); for (region_id, declined_bytes) in region_declined_bytes.drain(..) { let _ = self.ctx.router.send( region_id, PeerMsg::CasualMessage(CasualMessage::CompactionDeclinedBytes { bytes: declined_bytes, }), ); } } fn register_compact_check_tick(&self) { self.ctx.schedule_store_tick( StoreTick::CompactCheck, self.ctx.cfg.region_compact_check_interval.0, ) } fn on_compact_check_tick(&mut self) { self.register_compact_check_tick(); if self.ctx.cleanup_scheduler.is_busy() { debug!( "compact worker is busy, check space redundancy next time"; "store_id" => self.fsm.store.id, ); return; } if rocks::util::auto_compactions_is_disabled(&self.ctx.engines.kv) { debug!( "skip compact check when disabled auto compactions"; "store_id" => self.fsm.store.id, ); return; } // Start from last checked key. let mut ranges_need_check = Vec::with_capacity(self.ctx.cfg.region_compact_check_step as usize + 1); ranges_need_check.push(self.fsm.store.last_compact_checked_key.clone()); let largest_key = { let meta = self.ctx.store_meta.lock().unwrap(); if meta.region_ranges.is_empty() { debug!( "there is no range need to check"; "store_id" => self.fsm.store.id ); return; } // Collect continuous ranges. let left_ranges = meta.region_ranges.range(( Excluded(self.fsm.store.last_compact_checked_key.clone()), Unbounded::<Key>, )); ranges_need_check.extend( left_ranges .take(self.ctx.cfg.region_compact_check_step as usize) .map(|(k, _)| k.to_owned()), ); // Update last_compact_checked_key. meta.region_ranges.keys().last().unwrap().to_vec() }; let last_key = ranges_need_check.last().unwrap().clone(); if last_key == largest_key { // Range [largest key, DATA_MAX_KEY) also need to check. if last_key != keys::DATA_MAX_KEY.to_vec() { ranges_need_check.push(keys::DATA_MAX_KEY.to_vec()); } // Next task will start from the very beginning. self.fsm.store.last_compact_checked_key = keys::DATA_MIN_KEY.to_vec(); } else { self.fsm.store.last_compact_checked_key = last_key; } // Schedule the task. let cf_names = vec![CF_DEFAULT.to_owned(), CF_WRITE.to_owned()]; if let Err(e) = self.ctx.cleanup_scheduler.schedule(CleanupTask::Compact( CompactTask::CheckAndCompact { cf_names, ranges: ranges_need_check, tombstones_num_threshold: self.ctx.cfg.region_compact_min_tombstones, tombstones_percent_threshold: self.ctx.cfg.region_compact_tombstones_percent, }, )) { error!( "schedule space check task failed"; "store_id" => self.fsm.store.id, "err" => ?e, ); } } fn store_heartbeat_pd(&mut self) { let mut stats = StoreStats::default(); let used_size = self.ctx.snap_mgr.get_total_snap_size(); stats.set_used_size(used_size); stats.set_store_id(self.ctx.store_id()); { let meta = self.ctx.store_meta.lock().unwrap(); stats.set_region_count(meta.regions.len() as u32); } let snap_stats = self.ctx.snap_mgr.stats(); stats.set_sending_snap_count(snap_stats.sending_count as u32); stats.set_receiving_snap_count(snap_stats.receiving_count as u32); STORE_SNAPSHOT_TRAFFIC_GAUGE_VEC .with_label_values(&["sending"]) .set(snap_stats.sending_count as i64); STORE_SNAPSHOT_TRAFFIC_GAUGE_VEC .with_label_values(&["receiving"]) .set(snap_stats.receiving_count as i64); let apply_snapshot_count = self.ctx.applying_snap_count.load(Ordering::SeqCst); stats.set_applying_snap_count(apply_snapshot_count as u32); STORE_SNAPSHOT_TRAFFIC_GAUGE_VEC .with_label_values(&["applying"]) .set(apply_snapshot_count as i64); stats.set_start_time(self.fsm.store.start_time.unwrap().sec as u32); // report store write flow to pd stats.set_bytes_written( self.ctx .global_stat .stat .engine_total_bytes_written .swap(0, Ordering::SeqCst), ); stats.set_keys_written( self.ctx .global_stat .stat .engine_total_keys_written .swap(0, Ordering::SeqCst), ); stats.set_is_busy( self.ctx .global_stat .stat .is_busy .swap(false, Ordering::SeqCst), ); let store_info = StoreInfo { engine: Arc::clone(&self.ctx.engines.kv), capacity: self.ctx.cfg.capacity.0, }; let task = PdTask::StoreHeartbeat { stats, store_info }; if let Err(e) = self.ctx.pd_scheduler.schedule(task) { error!("notify pd failed"; "store_id" => self.fsm.store.id, "err" => ?e ); } } fn on_pd_store_heartbeat_tick(&mut self) { self.store_heartbeat_pd(); self.register_pd_store_heartbeat_tick(); } fn handle_snap_mgr_gc(&mut self) -> Result<()> { let snap_keys = self.ctx.snap_mgr.list_idle_snap()?; if snap_keys.is_empty() { return Ok(()); } let (mut last_region_id, mut keys) = (0, vec![]); let schedule_gc_snap = |region_id: u64, snaps| -> Result<()> { debug!( "schedule snap gc"; "store_id" => self.fsm.store.id, "region_id" => region_id, ); let gc_snap = PeerMsg::CasualMessage(CasualMessage::GcSnap { snaps }); match self.ctx.router.send(region_id, gc_snap) { Ok(()) => Ok(()), Err(TrySendError::Disconnected(PeerMsg::CasualMessage( CasualMessage::GcSnap { snaps }, ))) => { // The snapshot exists because MsgAppend has been rejected. So the // peer must have been exist. But now it's disconnected, so the peer // has to be destroyed instead of being created. info!( "region is disconnected, remove snaps"; "region_id" => region_id, "snaps" => ?snaps, ); for (key, is_sending) in snaps { let snap = if is_sending { self.ctx.snap_mgr.get_snapshot_for_sending(&key)? } else { self.ctx.snap_mgr.get_snapshot_for_applying(&key)? }; self.ctx .snap_mgr .delete_snapshot(&key, snap.as_ref(), false); } Ok(()) } Err(TrySendError::Full(_)) => Ok(()), Err(TrySendError::Disconnected(_)) => unreachable!(), } }; for (key, is_sending) in snap_keys { if last_region_id == key.region_id { keys.push((key, is_sending)); continue; } if !keys.is_empty() { schedule_gc_snap(last_region_id, keys)?; keys = vec![]; } last_region_id = key.region_id; keys.push((key, is_sending)); } if !keys.is_empty() { schedule_gc_snap(last_region_id, keys)?; } Ok(()) } fn on_snap_mgr_gc(&mut self) { if let Err(e) = self.handle_snap_mgr_gc() { error!( "handle gc snap failed"; "store_id" => self.fsm.store.id, "err" => ?e ); } self.register_snap_mgr_gc_tick(); } fn on_compact_lock_cf(&mut self) { // Create a compact lock cf task(compact whole range) and schedule directly. let lock_cf_bytes_written = self .ctx .global_stat .stat .lock_cf_bytes_written .load(Ordering::SeqCst); if lock_cf_bytes_written > self.ctx.cfg.lock_cf_compact_bytes_threshold.0 { self.ctx .global_stat .stat .lock_cf_bytes_written .fetch_sub(lock_cf_bytes_written, Ordering::SeqCst); let task = CompactTask::Compact { cf_name: String::from(CF_LOCK), start_key: None, end_key: None, }; if let Err(e) = self .ctx .cleanup_scheduler .schedule(CleanupTask::Compact(task)) { error!( "schedule compact lock cf task failed"; "store_id" => self.fsm.store.id, "err" => ?e, ); } } self.register_compact_lock_cf_tick(); } fn register_pd_store_heartbeat_tick(&self) { self.ctx.schedule_store_tick( StoreTick::PdStoreHeartbeat, self.ctx.cfg.pd_store_heartbeat_tick_interval.0, ); } fn register_snap_mgr_gc_tick(&self) { self.ctx .schedule_store_tick(StoreTick::SnapGc, self.ctx.cfg.snap_mgr_gc_tick_interval.0) } fn register_compact_lock_cf_tick(&self) { self.ctx.schedule_store_tick( StoreTick::CompactLockCf, self.ctx.cfg.lock_cf_compact_interval.0, ) } } impl<'a, T: Transport, C: PdClient> StoreFsmDelegate<'a, T, C> { fn on_validate_sst_result(&mut self, ssts: Vec<SstMeta>) { if ssts.is_empty() { return; } // A stale peer can still ingest a stale SST before it is // destroyed. We need to make sure that no stale peer exists. let mut delete_ssts = Vec::new(); { let meta = self.ctx.store_meta.lock().unwrap(); for sst in ssts { if !meta.regions.contains_key(&sst.get_region_id()) { delete_ssts.push(sst); } } } if delete_ssts.is_empty() { return; } let task = CleanupSSTTask::DeleteSST { ssts: delete_ssts }; if let Err(e) = self .ctx .cleanup_scheduler .schedule(CleanupTask::CleanupSST(task)) { error!( "schedule to delete ssts failed"; "store_id" => self.fsm.store.id, "err" => ?e, ); } } fn on_cleanup_import_sst(&mut self) -> Result<()> { let mut delete_ssts = Vec::new(); let mut validate_ssts = Vec::new(); let ssts = box_try!(self.ctx.importer.list_ssts()); if ssts.is_empty() { return Ok(()); } { let meta = self.ctx.store_meta.lock().unwrap(); for sst in ssts { if let Some(r) = meta.regions.get(&sst.get_region_id()) { let region_epoch = r.get_region_epoch(); if util::is_epoch_stale(sst.get_region_epoch(), region_epoch) { // If the SST epoch is stale, it will not be ingested anymore. delete_ssts.push(sst); } } else { // If the peer doesn't exist, we need to validate the SST through PD. validate_ssts.push(sst); } } } if !delete_ssts.is_empty() { let task = CleanupSSTTask::DeleteSST { ssts: delete_ssts }; if let Err(e) = self .ctx .cleanup_scheduler .schedule(CleanupTask::CleanupSST(task)) { error!( "schedule to delete ssts failed"; "store_id" => self.fsm.store.id, "err" => ?e ); } } if !validate_ssts.is_empty() { let task = CleanupSSTTask::ValidateSST { ssts: validate_ssts, }; if let Err(e) = self .ctx .cleanup_scheduler .schedule(CleanupTask::CleanupSST(task)) { error!( "schedule to validate ssts failed"; "store_id" => self.fsm.store.id, "err" => ?e, ); } } Ok(()) } fn register_consistency_check_tick(&mut self) { self.ctx.schedule_store_tick( StoreTick::ConsistencyCheck, self.ctx.cfg.consistency_check_interval.0, ) } fn on_consistency_check_tick(&mut self) { self.register_consistency_check_tick(); if self.ctx.consistency_check_scheduler.is_busy() { return; } let (mut target_region_id, mut oldest) = (0, Instant::now()); let target_peer = { let meta = self.ctx.store_meta.lock().unwrap(); for region_id in meta.regions.keys() { match self.fsm.store.consistency_check_time.get(region_id) { Some(time) => { if *time < oldest { oldest = *time; target_region_id = *region_id; } } None => { target_region_id = *region_id; break; } } } if target_region_id == 0 { return; } match util::find_peer(&meta.regions[&target_region_id], self.ctx.store_id()) { None => return, Some(p) => p.clone(), } }; info!( "scheduling consistency check for region"; "store_id" => self.fsm.store.id, "region_id" => target_region_id, ); self.fsm .store .consistency_check_time .insert(target_region_id, Instant::now()); let mut request = new_admin_request(target_region_id, target_peer); let mut admin = AdminRequest::default(); admin.set_cmd_type(AdminCmdType::ComputeHash); request.set_admin_request(admin); let _ = self.ctx.router.send( target_region_id, PeerMsg::RaftCommand(RaftCommand::new(request, Callback::None)), ); } fn on_cleanup_import_sst_tick(&mut self) { if let Err(e) = self.on_cleanup_import_sst() { error!( "cleanup import sst failed"; "store_id" => self.fsm.store.id, "err" => ?e, ); } self.register_cleanup_import_sst_tick(); } fn register_cleanup_import_sst_tick(&self) { self.ctx.schedule_store_tick( StoreTick::CleanupImportSST, self.ctx.cfg.cleanup_import_sst_interval.0, ) } fn clear_region_size_in_range(&mut self, start_key: &[u8], end_key: &[u8]) { let start_key = data_key(start_key); let end_key = data_end_key(end_key); let mut regions = vec![]; { let meta = self.ctx.store_meta.lock().unwrap(); for (_, region_id) in meta .region_ranges .range((Excluded(start_key), Included(end_key))) { regions.push(*region_id); } } for region_id in regions { let _ = self.ctx.router.send( region_id, PeerMsg::CasualMessage(CasualMessage::ClearRegionSize), ); } } fn on_store_unreachable(&mut self, store_id: u64) { let now = Instant::now(); if self .fsm .store .last_unreachable_report .get(&store_id) .map_or(UNREACHABLE_BACKOFF, |t| now.duration_since(*t)) < UNREACHABLE_BACKOFF { return; } info!( "broadcasting unreachable"; "store_id" => self.fsm.store.id, "unreachable_store_id" => store_id, ); self.fsm.store.last_unreachable_report.insert(store_id, now); // It's possible to acquire the lock and only send notification to // involved regions. However loop over all the regions can take a // lot of time, which may block other operations. self.ctx.router.report_unreachable(store_id); } } fn size_change_filter(info: &CompactionJobInfo) -> bool { // When calculating region size, we only consider write and default // column families. let cf = info.cf_name(); if cf != CF_WRITE && cf != CF_DEFAULT { return false; } // Compactions in level 0 and level 1 are very frequently. if info.output_level() < 2 { return false; } true } pub fn new_compaction_listener(ch: RaftRouter) -> CompactionListener { let ch = Mutex::new(ch); let compacted_handler = Box::new(move |compacted_event: CompactedEvent| { let ch = ch.lock().unwrap(); if let Err(e) = ch.send_control(StoreMsg::CompactedEvent(compacted_event)) { error!( "send compaction finished event to raftstore failed"; "err" => ?e, ); } }); CompactionListener::new(compacted_handler, Some(size_change_filter)) } fn calc_region_declined_bytes( event: CompactedEvent, region_ranges: &BTreeMap<Key, u64>, bytes_threshold: u64, ) -> Vec<(u64, u64)> { // Calculate influenced regions. let mut influenced_regions = vec![]; for (end_key, region_id) in region_ranges.range((Excluded(event.start_key), Included(event.end_key.clone()))) { influenced_regions.push((region_id, end_key.clone())); } if let Some((end_key, region_id)) = region_ranges .range((Included(event.end_key), Unbounded)) .next() { influenced_regions.push((region_id, end_key.clone())); } // Calculate declined bytes for each region. // `end_key` in influenced_regions are in incremental order. let mut region_declined_bytes = vec![]; let mut last_end_key: Vec<u8> = vec![]; for (region_id, end_key) in influenced_regions { let mut old_size = 0; for prop in &event.input_props { old_size += prop.get_approximate_size_in_range(&last_end_key, &end_key); } let mut new_size = 0; for prop in &event.output_props { new_size += prop.get_approximate_size_in_range(&last_end_key, &end_key); } last_end_key = end_key; // Filter some trivial declines for better performance. if old_size > new_size && old_size - new_size > bytes_threshold { region_declined_bytes.push((*region_id, old_size - new_size)); } } region_declined_bytes } // check whether the range is covered by existing regions. fn is_range_covered<'a, F: Fn(u64) -> &'a metapb::Region>( region_ranges: &BTreeMap<Key, u64>, get_region: F, mut start: Vec<u8>, end: Vec<u8>, ) -> bool { for (end_key, &id) in region_ranges.range((Excluded(start.clone()), Unbounded::<Key>)) { let region = get_region(id); // find a missing range if start < enc_start_key(region) { return false; } if *end_key >= end { return true; } start = end_key.clone(); } false } #[cfg(test)] mod tests { use std::collections::BTreeMap; use crate::raftstore::coprocessor::properties::{RangeOffsets, RangeProperties}; use crate::storage::kv::CompactedEvent; use tikv_util::collections::HashMap; use super::*; #[test] fn test_calc_region_declined_bytes() { let prop = RangeProperties { offsets: vec![ ( b"a".to_vec(), RangeOffsets { size: 4 * 1024, keys: 1, }, ), ( b"b".to_vec(), RangeOffsets { size: 8 * 1024, keys: 2, }, ), ( b"c".to_vec(), RangeOffsets { size: 12 * 1024, keys: 3, }, ), ], }; let event = CompactedEvent { cf: "default".to_owned(), output_level: 3, total_input_bytes: 12 * 1024, total_output_bytes: 0, start_key: prop.smallest_key().unwrap(), end_key: prop.largest_key().unwrap(), input_props: vec![prop], output_props: vec![], }; let mut region_ranges = BTreeMap::new(); region_ranges.insert(b"a".to_vec(), 1); region_ranges.insert(b"b".to_vec(), 2); region_ranges.insert(b"c".to_vec(), 3); let declined_bytes = calc_region_declined_bytes(event, &region_ranges, 1024); let expected_declined_bytes = vec![(2, 8192), (3, 4096)]; assert_eq!(declined_bytes, expected_declined_bytes); } #[test] fn test_is_range_covered() { let meta = vec![(b"b", b"d"), (b"d", b"e"), (b"e", b"f"), (b"f", b"h")]; let mut region_ranges = BTreeMap::new(); let mut region_peers = HashMap::default(); { for (i, (start, end)) in meta.into_iter().enumerate() { let mut region = metapb::Region::default(); let peer = metapb::Peer::default(); region.set_peers(vec![peer].into()); region.set_start_key(start.to_vec()); region.set_end_key(end.to_vec()); region_ranges.insert(enc_end_key(&region), i as u64); region_peers.insert(i as u64, region); } let check_range = |start: &[u8], end: &[u8]| { is_range_covered( &region_ranges, |id: u64| &region_peers[&id], data_key(start), data_end_key(end), ) }; assert!(!check_range(b"a", b"c")); assert!(check_range(b"b", b"d")); assert!(check_range(b"b", b"e")); assert!(check_range(b"e", b"f")); assert!(check_range(b"b", b"g")); assert!(check_range(b"e", b"h")); assert!(!check_range(b"e", b"n")); assert!(!check_range(b"g", b"n")); assert!(!check_range(b"o", b"z")); assert!(!check_range(b"", b"")); } let meta = vec![(b"b", b"d"), (b"e", b"f"), (b"f", b"h")]; region_ranges.clear(); region_peers.clear(); { for (i, (start, end)) in meta.into_iter().enumerate() { let mut region = metapb::Region::default(); let peer = metapb::Peer::default(); region.set_peers(vec![peer].into()); region.set_start_key(start.to_vec()); region.set_end_key(end.to_vec()); region_ranges.insert(enc_end_key(&region), i as u64); region_peers.insert(i as u64, region); } let check_range = |start: &[u8], end: &[u8]| { is_range_covered( &region_ranges, |id: u64| &region_peers[&id], data_key(start), data_end_key(end), ) }; assert!(!check_range(b"a", b"c")); assert!(check_range(b"b", b"d")); assert!(!check_range(b"b", b"e")); assert!(check_range(b"e", b"f")); assert!(!check_range(b"b", b"g")); assert!(check_range(b"e", b"g")); assert!(check_range(b"e", b"h")); assert!(!check_range(b"e", b"n")); assert!(!check_range(b"g", b"n")); assert!(!check_range(b"o", b"z")); assert!(!check_range(b"", b"")); } } }
36.314688
121
0.551831
480b0cd1bc5a05b14cf2ddd76d518dc04a3e8f83
12,012
use futures::{self, Async}; use serde_derive::{Deserialize, Serialize}; use base64; use actix_web::{ HttpResponse, http::header::HeaderMap, }; use serde_json::{Value, map::Map}; use bytes::Bytes; use std::str::FromStr; #[macro_use] pub mod app; pub mod image; mod app_tests; mod tests; /// С целью сокращение длины имени файла, его имя из HEX формата конвертируется в url-safe base64. /// С целью облегчения чтения списка файлов каталога, когда их может быть очень много, /// функция автоматически генерирует имена поддиректорий на основе трех первых символов имени файла. /// #Example: /// i2D7SQIQYCYlr9t4JMXgFw.png -> i/2/D/7SQIQYCYlr9t4JMXgFw.png /// Пример использования: /// let ( /// src_path, // полный путь к файлу /// src_subdir // путь к директории файла /// ) = utils::get_file_path_by_hex("6568c9e9c35a7fa06f236e89f704d8c9b47183a24f2c978dba6c92e2747e3a13", ".png"); pub fn get_file_path_by_hex(hex_name: &str, extension: &str) -> (std::path::PathBuf, std::path::PathBuf) { let src_name = format!("{}{}", hex_to_base64(hex_name).trim_end_matches('='), extension); let target_subdir = std::path::PathBuf::from_str(&src_name[..1]) .unwrap() .join(&src_name[1..2]) .join(&src_name[2..3]); (target_subdir.join(&src_name[3..]), target_subdir) } /// Конвертирует хэш-сумму из heх представления в base64 #[inline] pub fn hex_to_base64(hex: &str) -> String { let mut bytes = Vec::new(); let len = hex.len(); let mut i = 0; let mut next = 2; while i < len { match u8::from_str_radix(&hex[i..next], 16) { Ok(v) => bytes.push(v), Err(e) => eprintln!("Problem with hex: {}", e), } i = next; next += 2; } base64::encode_config(&bytes, base64::URL_SAFE) } pub const URL_DATA_PREFIX: &'static str = "data:"; /// Проверяет URL на наличие встроенного содержимого. #[inline] pub fn is_url_data(input: &str) -> bool { input.starts_with(URL_DATA_PREFIX) } /// Конвертирует строку URL со встроенным содержимым в ImageInput. pub fn data_url_parse_inner(input: String) -> Result<ImageInput, ImageError> { let start_ptr = input.as_ptr() as usize; let end_ptr = start_ptr + input.len(); let type_start = URL_DATA_PREFIX.as_bytes().len(); let type_start_ptr = start_ptr + type_start; let mut offset_ptr = type_start_ptr; let mut type_length: usize = 0; while offset_ptr < end_ptr { if b';' == unsafe { *(offset_ptr as *const u8) } { type_length = offset_ptr - type_start_ptr; offset_ptr += 1; break; } offset_ptr += 1; } if type_length < 1 { return Err(ImageError::Invalid); } let mut content_start = 0; while offset_ptr < end_ptr { if b',' == unsafe { *(offset_ptr as *const u8) } { offset_ptr += 1; content_start = offset_ptr - start_ptr; break; } offset_ptr += 1; } if content_start < 1 { return Err(ImageError::Invalid); } if let Ok(mime_type_source) = std::str::from_utf8(unsafe { std::slice::from_raw_parts(type_start_ptr as *const u8, type_length) }) { if let Ok(mime_type) = mime_type_source.parse::<mime::Mime>() { return Ok(ImageInput { name: None, r#type: mime_type, content: Bytes::from(input).slice_from(content_start) }); } } Err(ImageError::Invalid) } /// Конвертирует строку URL со встроенным содержимым в ImageInput. #[inline] pub fn data_url_parse(input: String) -> Result<ImageInput, ImageError> { if is_url_data(&input) { data_url_parse_inner(input) } else { Err(ImageError::Invalid) } } /// Конвертирует JSON объект в ImageInput. #[inline] pub fn json_value_to_upload_image_item(obj: &mut Map<String, Value>) -> Result<ImageInput, ImageError> { let name = match obj.remove("name") { Some(Value::String(name)) => Some(name), _ => None }; if let Some(Value::String(mime_type)) = obj.remove("type") { if let Ok(mime_type) = mime_type.parse::<mime::Mime>() { if let Some(Value::String(content)) = obj.remove("content") { return Ok(ImageInput { name, r#type: mime_type, content: Bytes::from(content), }); } } } Err(ImageError::Invalid) } #[inline] pub fn response_bad_request() -> HttpResponse { HttpResponse::BadRequest().json(ImageError::Invalid.get_serializable()) } #[inline] pub fn response_payload_too_large() -> HttpResponse { HttpResponse::PayloadTooLarge().json(ImageError::Overflow.get_serializable()) } #[inline] pub fn response_unsupported_media_type() -> HttpResponse { HttpResponse::UnsupportedMediaType().json(ImageError::Unsupported.get_serializable()) } /// Извлекает числовое значение заголовка content-length, при условии, что оно не превышает max_size, иначе ошибка. #[inline] pub fn get_content_length(headers: &HeaderMap, mut max_size: u64) -> Result<u64, ()> { if let Some(v) = headers.get("content-length") { if let Ok(v) = v.to_str() { if let Ok(size) = u64::from_str_radix(v, 10) { if size > max_size { return Err(()); } max_size = size; } } } Ok(max_size) } /// Конвертирует результат в HttpResponse #[inline] pub fn to_response<A, E>(r: Result<A, E>) -> Result<HttpResponse, actix_web::Error> where A: serde::Serialize, E: std::fmt::Debug { Ok(match r { Ok(value) => HttpResponse::Ok().json(value), Err(err) => { eprintln!("Error: {:?}", err); response_bad_request() } }) } /// Структура входных данных объекта изображения #[derive(PartialEq, Debug, Clone)] pub struct ImageInput { pub name: Option<String>, pub r#type: mime::Mime, pub content: Bytes, //base64 content } /// Возможные ошибки обработки изображения #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] #[serde(tag = "type")] pub enum ImageError { Invalid, Overflow, Unsupported, NetError, WriteError, } impl ImageError { pub fn get_serializable(self) -> ImageErrorWrapper { ImageErrorWrapper { error: self } } } #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct ImageErrorWrapper { pub error: ImageError, } /// Структура выходных данных объекта изображения #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct ImageOutput { pub name: std::path::PathBuf, pub size: u64, } /// Структура выходных данных объекта изображения #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct ImageOutputItem { pub original: Option<std::path::PathBuf>, pub name: std::path::PathBuf, pub size: u64, } // StreamToSink #[derive(Debug)] pub struct StreamToSink<Stream, Sink, Encryptor> { state: Option<(Stream, Sink, Encryptor)>, bytes: Option<bytes::Bytes>, amt: u64, read_done: bool, size_limit: u64 } /// Стримит данные c хэшированием и ограничением длины входных данных pub fn stream_to_sink<Stream, Sink, Encryptor>(stream: Stream, sink: Sink, encryptor: Encryptor, size_limit: u64) -> StreamToSink<Stream, Sink, Encryptor> where Stream: futures::stream::Stream<Item = bytes::Bytes, Error = std::io::Error>, Sink: futures::sink::Sink<SinkItem = bytes::Bytes, SinkError = std::io::Error>, Encryptor: crypto::digest::Digest, { StreamToSink { state: Some((stream, sink, encryptor)), bytes: None, amt: 0, read_done: false, size_limit } } impl<Stream, Sink, Encryptor> futures::Future for StreamToSink<Stream, Sink, Encryptor> where Stream: futures::stream::Stream<Item = bytes::Bytes, Error = std::io::Error>, Sink: futures::sink::Sink<SinkItem = bytes::Bytes, SinkError = std::io::Error>, Encryptor: crypto::digest::Digest, { type Item = (Stream, Sink, Encryptor, u64); type Error = std::io::Error; fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> { let (stream, sink, encryptor) = self.state.as_mut().unwrap(); loop { try_ready!(sink.poll_complete()); if self.read_done { if self.bytes == None { let (stream, sink, encryptor) = self.state.take().unwrap(); return Ok(Async::Ready(( stream, sink, encryptor, self.amt, ))); } } else { if self.bytes == None { match stream.poll() { Ok(async_value) => { match async_value { Async::Ready(bytes_option) => { match bytes_option { Some(bytes) => { self.amt += bytes.len() as u64; if self.amt > self.size_limit { return Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "File size limited")); } encryptor.input(&bytes[..]); self.bytes = Some(bytes); }, None => { self.read_done = true; } } }, Async::NotReady => { return Ok(Async::NotReady); } } }, Err(e) => { return Err(e); } } } } if let Some(_) = self.bytes { match sink.start_send(self.bytes.take().unwrap()) { Ok(async_sink) => { if let futures::AsyncSink::NotReady(bytes) = async_sink { self.bytes = Some(bytes); return Ok(Async::NotReady); } }, Err(e) => { return Err(e); } } } } } } #[macro_export] macro_rules! boxed { ($value:expr) => ( { Box::new($value) as Box<dyn Future<Item = HttpResponse, Error = actix_web::Error>> } ); } #[macro_export] macro_rules! future_wrap { ($value:expr) => ( { boxed!(futures::future::ok::<_, actix_web::Error>($value)) } ); } const SIGN_FF_D8_FF: &'static [u8] = &[ 0xFF, 0xD8, 0xFF ]; //JPG const SIGN_89_50_4E_47_0D_0A_1A_0A: &'static [u8] = &[ 0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A ]; //PNG const SIGN_42_4D: &'static [u8] = &[ 0x42, 0x4D ]; //BMP /// Возвращвает content-type сооответсвующий сигнатуре изображения по первым байтам файла #[allow(dead_code)] #[inline] pub fn get_content_type_by_signature(src: &[u8]) -> Option<&'static str> { if src.len() > 3 && src.starts_with(SIGN_FF_D8_FF) { //JPG if match src[3] { 0xE8 | 0xE3 | 0xE2 | 0xE1 | 0xE0 => true, _ => false } { return Some("image/jpeg"); } } if src.starts_with(SIGN_89_50_4E_47_0D_0A_1A_0A) { return Some("image/png"); } if src.starts_with(SIGN_42_4D) { return Some("image/bmp"); } None }
30.48731
136
0.548535
f786371d1efdbb82a6f79b3bd2867d403e59a6ff
1,140
// Copyright 2018-2022 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub mod ext; pub mod test_api; mod chain_extension; mod database; mod exec_context; mod hashing; mod types; #[cfg(test)] mod tests; pub use chain_extension::ChainExtension; pub use types::AccountError; use derive_more::From; /// Errors which can happen when interacting with this crate. #[derive(Debug, From, PartialEq, Eq)] pub enum Error { Account(AccountError), #[from(ignore)] UninitializedBlocks, #[from(ignore)] UninitializedExecutionContext, #[from(ignore)] UnregisteredChainExtension, }
26.511628
75
0.737719
219335dbe6ff7bb09b95c0283ea2f90cee1ad56d
3,382
//! SeaORM Entity. Generated by sea-orm-codegen 0.8.0 use sea_orm::entity::prelude::*; #[derive(Copy, Clone, Default, Debug, DeriveEntity)] pub struct Entity; impl EntityName for Entity { fn table_name(&self) -> &str { "poems" } } use async_graphql::*; #[derive(Clone, Debug, PartialEq, DeriveModel, DeriveActiveModel, SimpleObject)] #[graphql(concrete(name = "Poem", params()))] pub struct Model { pub poem_uuid: Uuid, pub originator_uuid: Uuid, pub creation_ts: DateTimeWithTimeZone, pub set_uuid: Uuid, pub banter_uuid: Option<Uuid>, pub title: String, pub idx: i32, pub part_of_poetshuffle: bool, pub editor_uuid: Option<Uuid>, pub approved: Option<bool>, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] pub enum Column { PoemUuid, OriginatorUuid, CreationTs, SetUuid, BanterUuid, Title, Idx, PartOfPoetshuffle, EditorUuid, Approved, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] pub enum PrimaryKey { PoemUuid, } impl PrimaryKeyTrait for PrimaryKey { type ValueType = Uuid; fn auto_increment() -> bool { false } } #[derive(Copy, Clone, Debug, EnumIter)] pub enum Relation { Banter, Users2, Users1, Sets, Comments, } impl ColumnTrait for Column { type EntityName = Entity; fn def(&self) -> ColumnDef { match self { Self::PoemUuid => ColumnType::Uuid.def(), Self::OriginatorUuid => ColumnType::Uuid.def(), Self::CreationTs => ColumnType::TimestampWithTimeZone.def(), Self::SetUuid => ColumnType::Uuid.def(), Self::BanterUuid => ColumnType::Uuid.def().null(), Self::Title => ColumnType::String(Some(100u32)).def(), Self::Idx => ColumnType::Integer.def(), Self::PartOfPoetshuffle => ColumnType::Boolean.def(), Self::EditorUuid => ColumnType::Uuid.def().null(), Self::Approved => ColumnType::Boolean.def().null(), } } } impl RelationTrait for Relation { fn def(&self) -> RelationDef { match self { Self::Banter => Entity::belongs_to(super::banters::Entity) .from(Column::BanterUuid) .to(super::banters::Column::BanterUuid) .into(), Self::Users2 => Entity::belongs_to(super::users::Entity) .from(Column::EditorUuid) .to(super::users::Column::UserUuid) .into(), Self::Users1 => Entity::belongs_to(super::users::Entity) .from(Column::OriginatorUuid) .to(super::users::Column::UserUuid) .into(), Self::Sets => Entity::belongs_to(super::sets::Entity) .from(Column::SetUuid) .to(super::sets::Column::SetUuid) .into(), Self::Comments => Entity::has_many(super::comments::Entity).into(), } } } impl Related<super::banters::Entity> for Entity { fn to() -> RelationDef { Relation::Banter.def() } } impl Related<super::sets::Entity> for Entity { fn to() -> RelationDef { Relation::Sets.def() } } impl Related<super::comments::Entity> for Entity { fn to() -> RelationDef { Relation::Comments.def() } } impl ActiveModelBehavior for ActiveModel {}
26.84127
80
0.590479
ff7269a67c1f4f68fe7470ecf67c94b6e11b9ede
1,283
/* * Copyright (c) 2021 gematik GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Address { pub type_: Type, pub lines: Vec<Line>, pub city: Option<String>, pub zip_code: Option<String>, pub country: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { Both, Postal, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Line { pub value: String, pub extensions: Vec<LineExtension>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LineExtension { Street(String), Number(String), Addition(String), Postbox(String), }
26.729167
75
0.704599
f5b63ce71325dc7bc73a1cd4b5857e0e7d97208d
16,790
#[doc = "Register `INTENCLR` reader"] pub struct R(crate::R<INTENCLR_SPEC>); impl core::ops::Deref for R { type Target = crate::R<INTENCLR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<INTENCLR_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<INTENCLR_SPEC>) -> Self { R(reader) } } #[doc = "Register `INTENCLR` writer"] pub struct W(crate::W<INTENCLR_SPEC>); impl core::ops::Deref for W { type Target = crate::W<INTENCLR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<INTENCLR_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<INTENCLR_SPEC>) -> Self { W(writer) } } #[doc = "Write '1' to disable interrupt for STOPPED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STOPPED_A { #[doc = "0: Read: Disabled"] DISABLED = 0, #[doc = "1: Read: Enabled"] ENABLED = 1, } impl From<STOPPED_A> for bool { #[inline(always)] fn from(variant: STOPPED_A) -> Self { variant as u8 != 0 } } #[doc = "Field `STOPPED` reader - Write '1' to disable interrupt for STOPPED event"] pub struct STOPPED_R(crate::FieldReader<bool, STOPPED_A>); impl STOPPED_R { pub(crate) fn new(bits: bool) -> Self { STOPPED_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> STOPPED_A { match self.bits { false => STOPPED_A::DISABLED, true => STOPPED_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == STOPPED_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == STOPPED_A::ENABLED } } impl core::ops::Deref for STOPPED_R { type Target = crate::FieldReader<bool, STOPPED_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Write '1' to disable interrupt for STOPPED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STOPPED_AW { #[doc = "1: Disable"] CLEAR = 1, } impl From<STOPPED_AW> for bool { #[inline(always)] fn from(variant: STOPPED_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `STOPPED` writer - Write '1' to disable interrupt for STOPPED event"] pub struct STOPPED_W<'a> { w: &'a mut W, } impl<'a> STOPPED_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: STOPPED_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(STOPPED_AW::CLEAR) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Write '1' to disable interrupt for ENDRX event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ENDRX_A { #[doc = "0: Read: Disabled"] DISABLED = 0, #[doc = "1: Read: Enabled"] ENABLED = 1, } impl From<ENDRX_A> for bool { #[inline(always)] fn from(variant: ENDRX_A) -> Self { variant as u8 != 0 } } #[doc = "Field `ENDRX` reader - Write '1' to disable interrupt for ENDRX event"] pub struct ENDRX_R(crate::FieldReader<bool, ENDRX_A>); impl ENDRX_R { pub(crate) fn new(bits: bool) -> Self { ENDRX_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ENDRX_A { match self.bits { false => ENDRX_A::DISABLED, true => ENDRX_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == ENDRX_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == ENDRX_A::ENABLED } } impl core::ops::Deref for ENDRX_R { type Target = crate::FieldReader<bool, ENDRX_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Write '1' to disable interrupt for ENDRX event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ENDRX_AW { #[doc = "1: Disable"] CLEAR = 1, } impl From<ENDRX_AW> for bool { #[inline(always)] fn from(variant: ENDRX_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `ENDRX` writer - Write '1' to disable interrupt for ENDRX event"] pub struct ENDRX_W<'a> { w: &'a mut W, } impl<'a> ENDRX_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ENDRX_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(ENDRX_AW::CLEAR) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Write '1' to disable interrupt for END event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum END_A { #[doc = "0: Read: Disabled"] DISABLED = 0, #[doc = "1: Read: Enabled"] ENABLED = 1, } impl From<END_A> for bool { #[inline(always)] fn from(variant: END_A) -> Self { variant as u8 != 0 } } #[doc = "Field `END` reader - Write '1' to disable interrupt for END event"] pub struct END_R(crate::FieldReader<bool, END_A>); impl END_R { pub(crate) fn new(bits: bool) -> Self { END_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> END_A { match self.bits { false => END_A::DISABLED, true => END_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == END_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == END_A::ENABLED } } impl core::ops::Deref for END_R { type Target = crate::FieldReader<bool, END_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Write '1' to disable interrupt for END event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum END_AW { #[doc = "1: Disable"] CLEAR = 1, } impl From<END_AW> for bool { #[inline(always)] fn from(variant: END_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `END` writer - Write '1' to disable interrupt for END event"] pub struct END_W<'a> { w: &'a mut W, } impl<'a> END_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: END_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(END_AW::CLEAR) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Write '1' to disable interrupt for ENDTX event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ENDTX_A { #[doc = "0: Read: Disabled"] DISABLED = 0, #[doc = "1: Read: Enabled"] ENABLED = 1, } impl From<ENDTX_A> for bool { #[inline(always)] fn from(variant: ENDTX_A) -> Self { variant as u8 != 0 } } #[doc = "Field `ENDTX` reader - Write '1' to disable interrupt for ENDTX event"] pub struct ENDTX_R(crate::FieldReader<bool, ENDTX_A>); impl ENDTX_R { pub(crate) fn new(bits: bool) -> Self { ENDTX_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ENDTX_A { match self.bits { false => ENDTX_A::DISABLED, true => ENDTX_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == ENDTX_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == ENDTX_A::ENABLED } } impl core::ops::Deref for ENDTX_R { type Target = crate::FieldReader<bool, ENDTX_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Write '1' to disable interrupt for ENDTX event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ENDTX_AW { #[doc = "1: Disable"] CLEAR = 1, } impl From<ENDTX_AW> for bool { #[inline(always)] fn from(variant: ENDTX_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `ENDTX` writer - Write '1' to disable interrupt for ENDTX event"] pub struct ENDTX_W<'a> { w: &'a mut W, } impl<'a> ENDTX_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ENDTX_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(ENDTX_AW::CLEAR) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "Write '1' to disable interrupt for STARTED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STARTED_A { #[doc = "0: Read: Disabled"] DISABLED = 0, #[doc = "1: Read: Enabled"] ENABLED = 1, } impl From<STARTED_A> for bool { #[inline(always)] fn from(variant: STARTED_A) -> Self { variant as u8 != 0 } } #[doc = "Field `STARTED` reader - Write '1' to disable interrupt for STARTED event"] pub struct STARTED_R(crate::FieldReader<bool, STARTED_A>); impl STARTED_R { pub(crate) fn new(bits: bool) -> Self { STARTED_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> STARTED_A { match self.bits { false => STARTED_A::DISABLED, true => STARTED_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == STARTED_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == STARTED_A::ENABLED } } impl core::ops::Deref for STARTED_R { type Target = crate::FieldReader<bool, STARTED_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Write '1' to disable interrupt for STARTED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STARTED_AW { #[doc = "1: Disable"] CLEAR = 1, } impl From<STARTED_AW> for bool { #[inline(always)] fn from(variant: STARTED_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `STARTED` writer - Write '1' to disable interrupt for STARTED event"] pub struct STARTED_W<'a> { w: &'a mut W, } impl<'a> STARTED_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: STARTED_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(STARTED_AW::CLEAR) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 19)) | ((value as u32 & 0x01) << 19); self.w } } impl R { #[doc = "Bit 1 - Write '1' to disable interrupt for STOPPED event"] #[inline(always)] pub fn stopped(&self) -> STOPPED_R { STOPPED_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 4 - Write '1' to disable interrupt for ENDRX event"] #[inline(always)] pub fn endrx(&self) -> ENDRX_R { ENDRX_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 6 - Write '1' to disable interrupt for END event"] #[inline(always)] pub fn end(&self) -> END_R { END_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 8 - Write '1' to disable interrupt for ENDTX event"] #[inline(always)] pub fn endtx(&self) -> ENDTX_R { ENDTX_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 19 - Write '1' to disable interrupt for STARTED event"] #[inline(always)] pub fn started(&self) -> STARTED_R { STARTED_R::new(((self.bits >> 19) & 0x01) != 0) } } impl W { #[doc = "Bit 1 - Write '1' to disable interrupt for STOPPED event"] #[inline(always)] pub fn stopped(&mut self) -> STOPPED_W { STOPPED_W { w: self } } #[doc = "Bit 4 - Write '1' to disable interrupt for ENDRX event"] #[inline(always)] pub fn endrx(&mut self) -> ENDRX_W { ENDRX_W { w: self } } #[doc = "Bit 6 - Write '1' to disable interrupt for END event"] #[inline(always)] pub fn end(&mut self) -> END_W { END_W { w: self } } #[doc = "Bit 8 - Write '1' to disable interrupt for ENDTX event"] #[inline(always)] pub fn endtx(&mut self) -> ENDTX_W { ENDTX_W { w: self } } #[doc = "Bit 19 - Write '1' to disable interrupt for STARTED event"] #[inline(always)] pub fn started(&mut self) -> STARTED_W { STARTED_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Disable interrupt\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intenclr](index.html) module"] pub struct INTENCLR_SPEC; impl crate::RegisterSpec for INTENCLR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [intenclr::R](R) reader structure"] impl crate::Readable for INTENCLR_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [intenclr::W](W) writer structure"] impl crate::Writable for INTENCLR_SPEC { type Writer = W; } #[doc = "`reset()` method sets INTENCLR to value 0"] impl crate::Resettable for INTENCLR_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
29.611993
406
0.572126
33311334cb6662043587bf8ce5226618dc14a767
4,220
use parity_codec::{Decode, Encode}; use primitives::H160; use rstd::prelude::Vec; pub type MemberId = u64; pub type ProposalId = u64; // token factory types pub type TokenBalance = u128; pub type TokenId = u32; #[derive(Encode, Decode, Default, Clone, PartialEq)] #[cfg_attr(feature = "std", derive(Debug))] pub struct Token { pub id: TokenId, pub decimals: u16, pub symbol: Vec<u8>, } #[derive(Encode, Decode, Default, Clone, PartialEq)] #[cfg_attr(feature = "std", derive(Debug))] pub struct Limits { pub max_tx_value: u128, pub day_max_limit: u128, pub day_max_limit_for_one_address: u128, pub max_pending_tx_limit: u128, pub min_tx_value: u128, } // bridge types #[derive(Encode, Decode, Clone)] #[cfg_attr(feature = "std", derive(Debug))] pub struct BridgeTransfer<Hash> { pub transfer_id: ProposalId, pub message_id: Hash, pub open: bool, pub votes: MemberId, pub kind: Kind, } #[derive(Encode, Decode, Clone, PartialEq)] #[cfg_attr(feature = "std", derive(Debug))] pub enum Status { Revoked, Pending, PauseTheBridge, ResumeTheBridge, UpdateValidatorSet, UpdateLimits, Deposit, Withdraw, Approved, Canceled, Confirmed, } #[derive(Encode, Decode, Clone, PartialEq)] #[cfg_attr(feature = "std", derive(Debug))] pub enum Kind { Transfer, Limits, Validator, Bridge, } #[derive(Encode, Decode, Clone)] #[cfg_attr(feature = "std", derive(Debug))] pub struct TransferMessage<AccountId, Hash> { pub message_id: Hash, pub eth_address: H160, pub substrate_address: AccountId, pub amount: TokenBalance, pub status: Status, pub action: Status, } #[derive(Encode, Decode, Clone)] #[cfg_attr(feature = "std", derive(Debug))] pub struct LimitMessage<Hash> { pub id: Hash, pub limits: Limits, pub status: Status, } #[derive(Encode, Decode, Clone)] #[cfg_attr(feature = "std", derive(Debug))] pub struct BridgeMessage<AccountId, Hash> { pub message_id: Hash, pub account: AccountId, pub action: Status, pub status: Status, } #[derive(Encode, Decode, Clone)] #[cfg_attr(feature = "std", derive(Debug))] pub struct ValidatorsMessage<AccountId, Hash> { pub message_id: Hash, pub quorum: u64, pub accounts: Vec<AccountId>, pub action: Status, pub status: Status, } impl<A, H> Default for TransferMessage<A, H> where A: Default, H: Default, { fn default() -> Self { TransferMessage { message_id: H::default(), eth_address: H160::default(), substrate_address: A::default(), amount: TokenBalance::default(), status: Status::Withdraw, action: Status::Withdraw, } } } impl<H> Default for LimitMessage<H> where H: Default, { fn default() -> Self { LimitMessage { id: H::default(), limits: Limits::default(), status: Status::UpdateLimits, } } } impl<A, H> Default for BridgeMessage<A, H> where A: Default, H: Default, { fn default() -> Self { BridgeMessage { message_id: H::default(), account: A::default(), action: Status::Revoked, status: Status::Revoked, } } } impl<A, H> Default for ValidatorsMessage<A, H> where A: Default, H: Default, { fn default() -> Self { ValidatorsMessage { message_id: H::default(), quorum: u64::default(), accounts: Vec::default(), action: Status::Revoked, status: Status::Revoked, } } } impl<H> Default for BridgeTransfer<H> where H: Default, { fn default() -> Self { BridgeTransfer { transfer_id: ProposalId::default(), message_id: H::default(), open: true, votes: MemberId::default(), kind: Kind::Transfer, } } } impl Limits { pub fn into_array(&self) -> [u128; 5] { [ self.max_tx_value, self.day_max_limit, self.day_max_limit_for_one_address, self.max_pending_tx_limit, self.min_tx_value, ] } }
22.094241
52
0.602133
bf4ede791bde4230b57b7a2976ff75bd3cf25781
845
// Copyright © Spelldawn 2021-present // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // https://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Contains the definitions for all cards in the game. pub mod artifacts; pub mod champion_spells; pub mod decklists; pub mod initialize; pub mod minions; pub mod overlord_spells; pub mod projects; pub mod schemes; pub mod test_cards; pub mod weapons;
31.296296
75
0.763314
143c593d15edb0d3c93fde3851d5a39632437629
2,629
use crate::api::{twitch::Channel, Twitch}; use crate::emotes; use crate::injector; use crate::irc; use crate::message_log; use crate::settings; use crate::storage::Cache; use anyhow::Result; pub struct Builder { twitch: Twitch, pub(crate) message_log: message_log::MessageLog, pub(crate) cache_stream: injector::Stream<Cache>, pub(crate) cache: Option<Cache>, pub(crate) enabled_stream: settings::Stream<bool>, pub(crate) enabled: bool, pub(crate) emotes_enabled_stream: settings::Stream<bool>, pub(crate) emotes_enabled: bool, } impl Builder { pub async fn new( twitch: Twitch, injector: &injector::Injector, message_log: message_log::MessageLog, settings: settings::Settings, ) -> Result<Self> { let (cache_stream, cache) = injector.stream::<Cache>().await; let (enabled_stream, enabled) = settings.stream("enabled").or_default().await?; let (emotes_enabled_stream, emotes_enabled) = settings.stream("emotes-enabled").or_default().await?; message_log.enabled(enabled).await; Ok(Self { twitch, message_log, cache_stream, cache, enabled_stream, enabled, emotes_enabled_stream, emotes_enabled, }) } /// Construct a new chat log with the specified configuration. pub fn build(&self) -> Result<Option<ChatLog>> { if !self.enabled { return Ok(None); } let emotes = match (self.emotes_enabled, self.cache.as_ref()) { (true, Some(cache)) => Some(emotes::Emotes::new(cache.clone(), self.twitch.clone())?), _ => None, }; Ok(Some(ChatLog { message_log: self.message_log.clone(), emotes, })) } } #[derive(Clone)] pub struct ChatLog { /// Log to add messages to. pub message_log: message_log::MessageLog, /// Handler of emotes. emotes: Option<emotes::Emotes>, } impl ChatLog { pub async fn observe(&self, tags: &irc::Tags, channel: &Channel, name: &str, message: &str) { let rendered = match self.emotes.as_ref() { Some(emotes) => match emotes.render(&tags, channel, name, message).await { Ok(rendered) => Some(rendered), Err(e) => { log::warn!("failed to render emotes: {}", e); None } }, None => None, }; self.message_log .push_back(&tags, &name, message, rendered) .await; } }
28.268817
98
0.5717
ede9104c679381705aaee4befbd0c36df709756c
1,267
use std::fs::File; use std::io::prelude::*; use std::path::Path; pub fn hexdump(buf: &Vec<u8>, from: usize, bytes: usize) { // Print offset print!("{:04X}: ", from); // Print hex bytes for n in from..from + bytes { match buf.get(n) { Some(x) => print!("{:02X} ", x), None => print!(" "), } } // Print ascii bytes print!(" "); for n in from..from + bytes { match buf.get(n) { Some(x) => { if (*x as char).is_alphanumeric() { print!("{}", *x as char); } else { print!("."); } } None => print!("."), } } println!(""); } pub fn load_file(filename: &str) -> Option<Vec<u8>> { let path = Path::new(&filename); let mut file = match File::open(&path) { Err(err) => { println!("Couldn't open {}: {:?}", filename, err); return None; } Ok(file) => file, }; let mut buf = Vec::new(); match file.read_to_end(&mut buf) { Err(err) => { println!("Couldn't read {}: {:?}", filename, err); return None; } Ok(_) => {} }; Some(buf) }
22.625
62
0.412786
f9ac5663185b7a89ab0db19d7e39cf40c467f93f
17,243
use crate::config::chain_info_request_timeout; use crate::routes::notifications::handlers::build_backend_request; use crate::routes::notifications::models::{ DeviceData, DeviceType, NotificationRegistrationRequest, SafeRegistration, }; use crate::tests::main::setup_rocket; use crate::utils::errors::{ApiError, ErrorDetails}; use crate::utils::http_client::{MockHttpClient, Request, Response}; use mockall::predicate::eq; use rocket::http::{ContentType, Header, Status}; use rocket::local::asynchronous::Client; use std::time::Duration; #[rocket::async_test] async fn delete_notification_success() { let uuid = "some_uuid"; let safe_address = "0x4cb09344de5bCCD45F045c5Defa0E0452869FF0f"; let mut chain_request = Request::new(config_uri!("/v1/chains/{}/", 4)); chain_request.timeout(Duration::from_millis(chain_info_request_timeout())); let mut mock_http_client = MockHttpClient::new(); mock_http_client .expect_get() .times(1) .with(eq(chain_request)) .return_once(move |_| { Ok(Response { status_code: 200, body: String::from(crate::tests::json::CHAIN_INFO_RINKEBY), }) }); let delete_request = Request::new(format!( "https://safe-transaction.rinkeby.staging.gnosisdev.com\ /api/v1/notifications/devices/{}/safes/{}/", uuid, safe_address )); mock_http_client .expect_delete() .times(1) .with(eq(delete_request)) .return_once(move |_| { Ok(Response { status_code: 204, body: String::new(), }) }); let client = Client::tracked( setup_rocket( mock_http_client, routes![super::super::routes::delete_notification_registration], ) .await, ) .await .expect("valid rocket instance"); let request = client .delete(format!( "/v1/chains/{}/notifications/devices/{}/safes/{}", 4, uuid, safe_address )) .header(Header::new("Host", "test.gnosis.io")) .header(ContentType::JSON); let response = request.dispatch().await; let actual_status = response.status(); assert_eq!(Status::Ok, actual_status); } #[rocket::async_test] async fn delete_notification_error() { let uuid = "some_uuid"; let safe_address = "0x4cb09344de5bCCD45F045c5Defa0E0452869FF0f"; let mut chain_request = Request::new(config_uri!("/v1/chains/{}/", 4)); chain_request.timeout(Duration::from_millis(chain_info_request_timeout())); let mut mock_http_client = MockHttpClient::new(); mock_http_client .expect_get() .times(1) .with(eq(chain_request)) .return_once(move |_| { Ok(Response { status_code: 200, body: String::from(crate::tests::json::CHAIN_INFO_RINKEBY), }) }); let delete_request = Request::new(format!( "https://safe-transaction.rinkeby.staging.gnosisdev.com\ /api/v1/notifications/devices/{}/safes/{}/", uuid, safe_address )); mock_http_client .expect_delete() .times(1) .with(eq(delete_request)) .return_once(move |_| { Err(ApiError::from_http_response(&Response { status_code: 422, body: String::new(), })) }); let client = Client::tracked( setup_rocket( mock_http_client, routes![super::super::routes::delete_notification_registration], ) .await, ) .await .expect("valid rocket instance"); let request = client .delete(format!( "/v1/chains/{}/notifications/devices/{}/safes/{}", 4, uuid, safe_address )) .header(Header::new("Host", "test.gnosis.io")) .header(ContentType::JSON); let response = request.dispatch().await; let actual_status = response.status(); assert_eq!(Status::UnprocessableEntity, actual_status); } #[rocket::async_test] async fn post_notification_success() { let safe_address = "0x4cb09344de5bCCD45F045c5Defa0E0452869FF0f"; let request = NotificationRegistrationRequest { device_data: DeviceData { uuid: None, cloud_messaging_token: "cloud_messaging_token".to_string(), build_number: "build_number".to_string(), bundle: "bundle".to_string(), device_type: DeviceType::Android, version: "version".to_string(), timestamp: None, }, safe_registrations: vec![ SafeRegistration { chain_id: "4".to_string(), safes: vec![safe_address.to_string()], signatures: vec!["signature".to_string()], }, SafeRegistration { chain_id: "137".to_string(), safes: vec![safe_address.to_string()], signatures: vec!["signature".to_string()], }, ], }; let backend_request = build_backend_request(&request.device_data, &request.safe_registrations[0]); // chain_id is ignored by this method let mut mock_http_client = MockHttpClient::new(); let mut rinkeby_chain_request = Request::new(config_uri!("/v1/chains/{}/", 4)); rinkeby_chain_request.timeout(Duration::from_millis(chain_info_request_timeout())); mock_http_client .expect_get() .with(eq(rinkeby_chain_request)) .times(1) .return_once(move |_| { Ok(Response { status_code: 200, body: String::from(crate::tests::json::CHAIN_INFO_RINKEBY), }) }); let mut polygon_chain_request = Request::new(config_uri!("/v1/chains/{}/", 137)); polygon_chain_request.timeout(Duration::from_millis(chain_info_request_timeout())); mock_http_client .expect_get() .with(eq(polygon_chain_request)) .times(1) .return_once(move |_| { Ok(Response { status_code: 200, body: String::from(crate::tests::json::CHAIN_INFO_POLYGON), }) }); let mut post_request_rinkeby = Request::new(String::from( "https://safe-transaction.rinkeby.staging.gnosisdev.com/api/v1/notifications/devices/", )); post_request_rinkeby.body(Some(serde_json::to_string(&backend_request).unwrap())); mock_http_client .expect_post() .times(1) .with(eq(post_request_rinkeby)) .return_once(move |_| { Ok(Response { status_code: 204, body: String::new(), }) }); let mut post_request_polygon = Request::new(String::from( "https://safe-transaction-polygon.staging.gnosisdev.com/api/v1/notifications/devices/", )); post_request_polygon.body(Some(serde_json::to_string(&backend_request).unwrap())); mock_http_client .expect_post() .times(1) .with(eq(post_request_polygon)) .return_once(move |_| { Ok(Response { status_code: 204, body: String::new(), }) }); let client = Client::tracked( setup_rocket( mock_http_client, routes![super::super::routes::post_notification_registration], ) .await, ) .await .expect("valid rocket instance"); let request = client .post("/v1/register/notifications") .body(&serde_json::to_string(&request).unwrap()) .header(Header::new("Host", "test.gnosis.io")) .header(ContentType::JSON); let response = request.dispatch().await; let actual_status = response.status(); assert_eq!(Status::Ok, actual_status); } #[rocket::async_test] async fn post_notification_client_error() { let safe_address = "0x4cb09344de5bCCD45F045c5Defa0E0452869FF0f"; let expected_error = ErrorDetails { code: 1337, message: Some("Push notification registration failed for chain IDs: 4, 137".to_string()), arguments: None, debug: serde_json::from_str( "[{\"4\":{\"code\":500,\"message\":null}}\ ,{\"137\":{\"safes\":{\"0\":[\"Address 0x0 is not valid\"]},\ \"timestamp\":[\"Provided timestamp is not in a range within 5 minutes\"]}}]", ) .ok(), }; let request = NotificationRegistrationRequest { device_data: DeviceData { uuid: None, cloud_messaging_token: "cloud_messaging_token".to_string(), build_number: "build_number".to_string(), bundle: "bundle".to_string(), device_type: DeviceType::Android, version: "version".to_string(), timestamp: None, }, safe_registrations: vec![ SafeRegistration { chain_id: "4".to_string(), safes: vec!["0x0".to_string()], signatures: vec!["signature".to_string()], }, SafeRegistration { chain_id: "137".to_string(), safes: vec![safe_address.to_string()], signatures: vec!["signature".to_string()], }, ], }; let rinkeby_backend_request = build_backend_request(&request.device_data, &request.safe_registrations[0]); let polygon_backend_request = build_backend_request(&request.device_data, &request.safe_registrations[1]); let mut mock_http_client = MockHttpClient::new(); let mut rinkeby_chain_request = Request::new(config_uri!("/v1/chains/{}/", 4)); rinkeby_chain_request.timeout(Duration::from_millis(chain_info_request_timeout())); mock_http_client .expect_get() .with(eq(rinkeby_chain_request)) .times(1) .return_once(move |_| { Ok(Response { status_code: 200, body: String::from(crate::tests::json::CHAIN_INFO_RINKEBY), }) }); let mut polygon_chain_request = Request::new(config_uri!("/v1/chains/{}/", 137)); polygon_chain_request.timeout(Duration::from_millis(chain_info_request_timeout())); mock_http_client .expect_get() .with(eq(polygon_chain_request)) .times(1) .return_once(move |_| { Ok(Response { status_code: 200, body: String::from(crate::tests::json::CHAIN_INFO_POLYGON), }) }); let mut post_request_rinkeby = Request::new(String::from( "https://safe-transaction.rinkeby.staging.gnosisdev.com/api/v1/notifications/devices/", )); post_request_rinkeby.body(Some( serde_json::to_string(&rinkeby_backend_request).unwrap(), )); mock_http_client .expect_post() .times(1) .with(eq(post_request_rinkeby)) .return_once(move |_| { Err(ApiError::from_http_response(&Response { status_code: 422, body: serde_json::to_string(&ErrorDetails { code: 500, message: None, arguments: None, debug: None, }) .unwrap(), })) }); let mut post_request_polygon = Request::new(String::from( "https://safe-transaction-polygon.staging.gnosisdev.com/api/v1/notifications/devices/", )); post_request_polygon.body(Some( serde_json::to_string(&polygon_backend_request).unwrap(), )); mock_http_client .expect_post() .times(1) .with(eq(post_request_polygon)) .return_once(move |_| { Err(ApiError::new_from_message_with_code( 400, "{\"safes\": {\ \"0\": [\ \"Address 0x0 is not valid\"\ ]\ },\ \"timestamp\": [\ \"Provided timestamp is not in a range within 5 minutes\"\ ]}" .to_string(), )) }); let client = Client::tracked( setup_rocket( mock_http_client, routes![super::super::routes::post_notification_registration], ) .await, ) .await .expect("valid rocket instance"); let request = client .post("/v1/register/notifications") .body(&serde_json::to_string(&request).unwrap()) .header(Header::new("Host", "test.gnosis.io")) .header(ContentType::JSON); let response = request.dispatch().await; let actual_status = response.status(); let error_body = response.into_string().await.unwrap(); let actual = serde_json::from_str::<ErrorDetails>(&error_body).unwrap(); assert_eq!(Status::BadRequest, actual_status); assert_eq!(expected_error, actual); } #[rocket::async_test] async fn post_notification_server_and_client_errors() { let mut mock_http_client = MockHttpClient::new(); let safe_address = "0x4cb09344de5bCCD45F045c5Defa0E0452869FF0f"; // Mock /v1/chains/ let mut polygon_chain_request = Request::new(config_uri!("/v1/chains/{}/", 137)); polygon_chain_request.timeout(Duration::from_millis(chain_info_request_timeout())); mock_http_client .expect_get() .with(eq(polygon_chain_request)) .times(1) .return_once(move |_| { Ok(Response { status_code: 200, body: String::from(crate::tests::json::CHAIN_INFO_POLYGON), }) }); // Request Payload let request = NotificationRegistrationRequest { device_data: DeviceData { uuid: None, cloud_messaging_token: "cloud_messaging_token".to_string(), build_number: "build_number".to_string(), bundle: "bundle".to_string(), device_type: DeviceType::Android, version: "version".to_string(), timestamp: None, }, safe_registrations: vec![ SafeRegistration { chain_id: "137".to_string(), safes: vec!["0x0".to_string()], signatures: vec!["signature".to_string()], }, SafeRegistration { chain_id: "137".to_string(), safes: vec![safe_address.to_string()], signatures: vec!["signature".to_string()], }, ], }; // POST request with first payload – returns a 400 let polygon_backend_request_0 = build_backend_request(&request.device_data, &request.safe_registrations[0]); let mut post_request_polygon_0 = Request::new(String::from( "https://safe-transaction-polygon.staging.gnosisdev.com/api/v1/notifications/devices/", )); post_request_polygon_0.body(Some( serde_json::to_string(&polygon_backend_request_0).unwrap(), )); mock_http_client .expect_post() .times(1) .with(eq(post_request_polygon_0)) .return_once(move |_| { Err(ApiError::new_from_message_with_code( 400, "{ \"test\" : \"Some client error\"}".to_string(), )) }); // POST request with first payload – returns a 500 let polygon_backend_request_1 = build_backend_request(&request.device_data, &request.safe_registrations[1]); let mut post_request_polygon_1 = Request::new(String::from( "https://safe-transaction-polygon.staging.gnosisdev.com/api/v1/notifications/devices/", )); post_request_polygon_1.body(Some( serde_json::to_string(&polygon_backend_request_1).unwrap(), )); mock_http_client .expect_post() .times(1) .with(eq(post_request_polygon_1)) .return_once(move |_| { Err(ApiError::new_from_message_with_code( 500, "{ \"test\" : \"Some server error\"}".to_string(), )) }); // Test execution let client = Client::tracked( setup_rocket( mock_http_client, routes![super::super::routes::post_notification_registration], ) .await, ) .await .expect("valid rocket instance"); let request = client .post("/v1/register/notifications") .body(&serde_json::to_string(&request).unwrap()) .header(Header::new("Host", "test.gnosis.io")) .header(ContentType::JSON); let response = request.dispatch().await; let actual_status = response.status(); let error_body = response.into_string().await.unwrap(); let actual = serde_json::from_str::<ErrorDetails>(&error_body).unwrap(); let expected_error = ErrorDetails { code: 1337, message: Some("Push notification registration failed for chain IDs: 137, 137".to_string()), arguments: None, debug: serde_json::from_str("[ {\"137\" : { \"test\" : \"Some client error\"}}, {\"137\" : { \"test\" : \"Some server error\"}}]").ok(), }; assert_eq!(Status::InternalServerError, actual_status); assert_eq!(expected_error, actual); }
34.280318
144
0.585977
560b31b608ce90f692123f2d3b6d035c5b048e83
892
use crate::spec::{LinkerFlavor, Target, TargetOptions, TargetResult, Endianness}; pub fn target() -> TargetResult { let mut base = super::linux_musl_base::opts(); base.max_atomic_width = Some(128); Ok(Target { llvm_target: "aarch64-unknown-linux-musl".to_string(), target_endian: Endianness::Little, target_pointer_width: 64, target_c_int_width: "32".to_string(), target_env: "musl".to_string(), data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "linux".to_string(), target_vendor: "unknown".to_string(), linker_flavor: LinkerFlavor::Gcc, options: TargetOptions { abi_blacklist: super::arm_base::abi_blacklist(), target_mcount: "\u{1}_mcount".to_string(), .. base }, }) }
35.68
87
0.619955
672e93ca05e401fd2eaaa3c860be5c0f641db373
160
#[cfg(any(feature = "google-identity-accesscontextmanager-type"))] pub mod r#type; #[cfg(any(feature = "google-identity-accesscontextmanager-v1"))] pub mod v1;
32
66
0.74375
6939ce095a80df8278964be8733d5a8bf5eaa169
2,997
use lazy_static::lazy_static; use regex::Regex; use rhai::Scope; lazy_static! { static ref ESC_A: Regex = Regex::new(r"\b(r\d*|p\d*)\.").unwrap(); static ref ESC_G: Regex = Regex::new(r"\b(g\d*)\(((?:\s*[r|p]\d*\.\w+\s*,\s*){1,2}\s*[r|p]\d*\.\w+\s*)\)").unwrap(); pub(crate) static ref ESC_E: Regex = Regex::new(r"\beval\((?P<rule>[^)]*)\)").unwrap(); } pub fn escape_assertion(s: String) -> String { ESC_A.replace_all(&s, "${1}_").to_string() } pub fn escape_g_function(s: String) -> String { ESC_G.replace_all(&s, "${1}([${2}])").to_string() } pub fn remove_comments(mut s: String) -> String { if let Some(idx) = s.find('#') { s.truncate(idx); } s.trim_end().to_owned() } pub fn escape_eval(mut m: String, scope: &Scope) -> String { let cm = m.to_owned(); for caps in ESC_E.captures_iter(&cm) { if let Some(val) = scope.get_value::<String>(&caps["rule"]) { m = ESC_E .replace(m.as_str(), escape_assertion(format!("({})", &val)).as_str()) .to_string(); } else { panic!("{} not found in scope", &caps["rule"]); } } m } #[cfg(test)] mod tests { use super::*; #[test] fn test_escape_g_function() { let s = "g(r.sub, p.sub) && r.obj == p.obj && r.act == p.act"; let exp = "g([r.sub, p.sub]) && r.obj == p.obj && r.act == p.act"; assert_eq!(exp, escape_g_function(s.to_owned())); let s1 = "g2(r.sub, p.sub) && r.obj == p.obj && r.act == p.act"; let exp1 = "g2([r.sub, p.sub]) && r.obj == p.obj && r.act == p.act"; assert_eq!(exp1, escape_g_function(s1.to_owned())); let s2 = "g3(r.sub, p.sub) && r.obj == p.obj && r.act == p.act"; let exp2 = "g3([r.sub, p.sub]) && r.obj == p.obj && r.act == p.act"; assert_eq!(exp2, escape_g_function(s2.to_owned())); let s3 = "g3(r2.sub, p2.sub) && r2.obj == p2.obj && r2.act == p2.act"; let exp3 = "g3([r2.sub, p2.sub]) && r2.obj == p2.obj && r2.act == p2.act"; assert_eq!(exp3, escape_g_function(s3.to_owned())); } #[test] fn test_remove_comments() { assert!(remove_comments("#".to_owned()).is_empty()); assert_eq!( r#"g(r.sub, p.sub) && r.obj == p.obj && r.act == p.act || r.sub == "root""#, remove_comments( r#"g(r.sub, p.sub) && r.obj == p.obj && r.act == p.act || r.sub == "root" # root is the super user"#.to_owned() ) ); } #[test] fn test_escape_assertion() { let s = "g(r.sub, p.sub) && r.obj == p.obj && r.act == p.act"; let exp = "g(r_sub, p_sub) && r_obj == p_obj && r_act == p_act"; assert_eq!(exp, escape_assertion(s.to_owned())); let s1 = "g(r2.sub, p2.sub) && r2.obj == p2.obj && r2.act == p2.act"; let exp1 = "g(r2_sub, p2_sub) && r2_obj == p2_obj && r2_act == p2_act"; assert_eq!(exp1, escape_assertion(s1.to_owned())); } }
32.225806
127
0.511178
33fd9081ea2bb915174ed1afb3da0ec4c1a7b4a1
8,532
//! `cargo run --bin day_18` use std::fs::File; use std::io::{BufRead, BufReader}; #[derive(Clone, Debug)] struct Acre { current: char, next: char } struct Point { x: i32, y: i32 } fn build_initial_grid() -> Vec<Vec<Acre>> { let default = Acre { current: '?', next: '?' }; let grid_size = 50; let mut grid = vec![vec![default; grid_size]; grid_size]; let mut x = 0; for line in BufReader::new(File::open("src/data/day_18_input.txt").unwrap()).lines() { let line = line.unwrap(); let mut y = 0; for c in line.chars() { grid[x][y] = Acre { current: c, next: '?' }; y += 1; } x += 1; } grid } fn part1() -> () { let grid_size = 50; let mut grid = build_initial_grid(); let mut minutes = 10; while minutes != 0 { for x in 0..grid_size { let x_coord = x as i32; for y in 0..grid_size { let current = grid[x][y].current; let mut adjacent_points: Vec<Point> = Vec::new(); let y_coord = y as i32; let top_left = Point { x: x_coord - 1, y: y_coord - 1 }; let top = Point { x: x_coord - 1, y: y_coord }; let top_right = Point { x: x_coord - 1, y: y_coord + 1 }; let left = Point { x: x_coord, y: y_coord - 1 }; let right = Point { x: x_coord, y: y_coord + 1 }; let bottom_left = Point { x: x_coord + 1, y: y_coord - 1 }; let bottom = Point { x: x_coord + 1, y: y_coord }; let bottom_right = Point { x: x_coord + 1, y: y_coord + 1 }; adjacent_points.push(top_left); adjacent_points.push(top); adjacent_points.push(top_right); adjacent_points.push(left); adjacent_points.push(right); adjacent_points.push(bottom_left); adjacent_points.push(bottom); adjacent_points.push(bottom_right); let mut tree_count = 0; let mut lumberyard_count = 0; for p in adjacent_points { if p.x >= 0 && p.x < grid_size as i32 && p.y >= 0 && p.y < grid_size as i32 { let acre: &Acre = &grid[p.x as usize][p.y as usize]; if acre.current == '|' { tree_count += 1; } else if acre.current == '#' { lumberyard_count += 1; } } } match current { '.' => { if tree_count >= 3 { grid[x][y].next = '|'; } else { grid[x][y].next = current; } }, '|' => { if lumberyard_count >= 3 { grid[x][y].next = '#'; } else { grid[x][y].next = current; } }, '#' => { if lumberyard_count >= 1 && tree_count >= 1 { grid[x][y].next = current; } else { grid[x][y].next = '.' } }, _ => {} } } } for x in 0..grid_size { for y in 0..grid_size { grid[x][y].current = grid[x][y].next; } } minutes -= 1; } let mut lumberyard_count = 0; let mut wooded_acre_count = 0; for x in 0..grid_size { for y in 0..grid_size { if grid[x][y].current == '#' { lumberyard_count += 1; } else if grid[x][y].current == '|' { wooded_acre_count += 1 } } } println!("Part 1: {}", lumberyard_count * wooded_acre_count); } fn part2() -> () { let grid_size = 50; let mut grid = build_initial_grid(); let mut minutes = 1000000000; let mut len = 0; let mut sequence_started = false; let mut sequence_vec: Vec<i32> = Vec::new(); while minutes != 0 { for x in 0..grid_size { let x_coord = x as i32; for y in 0..grid_size { let current = grid[x][y].current; let mut adjacent_points: Vec<Point> = Vec::new(); let y_coord = y as i32; let top_left = Point { x: x_coord - 1, y: y_coord - 1 }; let top = Point { x: x_coord - 1, y: y_coord }; let top_right = Point { x: x_coord - 1, y: y_coord + 1 }; let left = Point { x: x_coord, y: y_coord - 1 }; let right = Point { x: x_coord, y: y_coord + 1 }; let bottom_left = Point { x: x_coord + 1, y: y_coord - 1 }; let bottom = Point { x: x_coord + 1, y: y_coord }; let bottom_right = Point { x: x_coord + 1, y: y_coord + 1 }; adjacent_points.push(top_left); adjacent_points.push(top); adjacent_points.push(top_right); adjacent_points.push(left); adjacent_points.push(right); adjacent_points.push(bottom_left); adjacent_points.push(bottom); adjacent_points.push(bottom_right); let mut tree_count = 0; let mut lumberyard_count = 0; for p in adjacent_points { if p.x >= 0 && p.x < grid_size as i32 && p.y >= 0 && p.y < grid_size as i32 { let acre: &Acre = &grid[p.x as usize][p.y as usize]; if acre.current == '|' { tree_count += 1; } else if acre.current == '#' { lumberyard_count += 1; } } } match current { '.' => { if tree_count >= 3 { grid[x][y].next = '|'; } else { grid[x][y].next = current; } }, '|' => { if lumberyard_count >= 3 { grid[x][y].next = '#'; } else { grid[x][y].next = current; } }, '#' => { if lumberyard_count >= 1 && tree_count >= 1 { grid[x][y].next = current; } else { grid[x][y].next = '.' } }, _ => {} } } } let mut lumberyard_count = 0; let mut wooded_acre_count = 0; for x in 0..grid_size { for y in 0..grid_size { grid[x][y].current = grid[x][y].next; if grid[x][y].current == '#' { lumberyard_count += 1; } else if grid[x][y].current == '|' { wooded_acre_count += 1 } } } let total = lumberyard_count * wooded_acre_count; // Some "magic number" programming.. I noticed that there is // a 28 number sequence after the 400th minute (iteration) by // printing out totals to stdout. There is probably a general // way of determining this if total == 172765 { if !sequence_started { len = 1000000000 - minutes - 1; sequence_started = true; } else { break; } } if sequence_started { sequence_vec.push(total); } minutes -= 1; } let index = ((1000000000 - len) % sequence_vec.len()) - 2; println!("Part 2: {}", sequence_vec.get(index).unwrap()); } fn main() -> (){ part1(); part2(); }
31.955056
97
0.392757
ab31b8301720d4d82930376d37ebdf93c177590f
27,435
use crate::rsx::ast::Attribute; use crate::rsx::ast::Node; use crate::rsx::ast::Value; use crate::rsx::error::Error; use crate::rsx::error::Result; use crate::util::flatten_non_braces; use crate::util::token_stream_eq; use crate::util::MicroVec; use crate::util::TokenIterator; use ::proc_macro2::TokenStream; use ::proc_macro2::TokenTree; use ::std::fmt::Write; use ::std::vec::IntoIter; const COLON: char = ':'; const EXCLAMATION_MARK: char = '!'; const HYPHEN: char = '-'; const LEFT_ANGLE: char = '<'; const RIGHT_ANGLE: char = '>'; const FORWARD_SLASH: char = '/'; const EQUALS: char = '='; static COMMENT_CLOSING_LOOKAHEAD: &'static [char] = &[HYPHEN, HYPHEN, RIGHT_ANGLE]; static TAG_OPENING_LOOKAHEAD: &'static [char] = &[LEFT_ANGLE]; static TAG_CLOSING_LOOKAHEAD: &'static [char] = &[LEFT_ANGLE, FORWARD_SLASH]; type TokenIteratorVec = TokenIterator<IntoIter<TokenTree>>; pub fn parse(stream: TokenStream) -> Result<Node> { parse_root(stream) } fn parse_root(stream: TokenStream) -> Result<Node> { if stream.is_empty() { return Err(Error::EmptyMacroStreamGiven); } let flat_stream = flatten_non_braces(stream); let mut input = TokenIterator::new(flat_stream); let node = parse_root_node(&mut input)?; if !input.is_empty() { return Err(Error::ExcessTokensFound); } Ok(node) } fn parse_root_node(input: &mut TokenIteratorVec) -> Result<Node> { let mut nodes = MicroVec::new(); while !input.is_empty() { let node = parse_node(input)?; nodes.push(node); } match nodes { MicroVec::None => Err(Error::EmptyMacroStreamGiven), MicroVec::Item(node) => Ok(node), MicroVec::Vec(children) => Ok(Node::Fragment { children }), } } fn parse_node(input: &mut TokenIteratorVec) -> Result<Node> { if input.is_next_punct(LEFT_ANGLE) { if input.is_lookahead_punct(EXCLAMATION_MARK, 1) { if input.is_lookahead_punct(HYPHEN, 2) { parse_node_comment(input) } else { parse_node_doctype(input) } } else { parse_node_tag(input) } } else if input.is_brace_group() { Ok(Node::Code(input.chomp_brace_group()?)) } else { parse_node_text(input) } } fn parse_node_comment(input: &mut TokenIteratorVec) -> Result<Node> { input.chomp_puncts(&[LEFT_ANGLE, EXCLAMATION_MARK, HYPHEN, HYPHEN])?; let children = parse_comment_children(input)?; input.chomp_puncts(&[HYPHEN, HYPHEN, RIGHT_ANGLE])?; Ok(Node::Comment { children }) } fn parse_comment_children(input: &mut TokenIteratorVec) -> Result<Option<Vec<Node>>> { let mut maybe_children = None; loop { if input.is_empty() { return Err(Error::MoreTokensExpected); } if input.is_lookahead_puncts(&COMMENT_CLOSING_LOOKAHEAD) { return Ok(maybe_children); } let child = if input.is_brace_group() { Node::Code(input.chomp_brace_group()?) } else { Node::Text(parse_text(input, &COMMENT_CLOSING_LOOKAHEAD)?) }; match maybe_children.as_mut() { Some(children) => children.push(child), None => maybe_children = Some(vec![child]), } } } fn parse_node_doctype(input: &mut TokenIteratorVec) -> Result<Node> { input.chomp_puncts(&[LEFT_ANGLE, EXCLAMATION_MARK])?; let name = parse_name(input)?; let attributes = parse_attributes(input)?; input.chomp_punct(RIGHT_ANGLE)?; Ok(Node::Doctype { name, attributes }) } fn parse_node_tag(input: &mut TokenIteratorVec) -> Result<Node> { input.chomp_punct(LEFT_ANGLE)?; // parses </> if input.is_lookahead_puncts(&[FORWARD_SLASH, RIGHT_ANGLE]) { input.chomp_puncts(&[FORWARD_SLASH, RIGHT_ANGLE])?; return Ok(Node::Empty); } // parses <>(... contents)</> if input.is_next_punct(RIGHT_ANGLE) { input.chomp_punct(RIGHT_ANGLE)?; let maybe_children = parse_children(input)?; input.chomp_puncts(&[LEFT_ANGLE, FORWARD_SLASH, RIGHT_ANGLE])?; return match maybe_children { Some(children) => Ok(Node::Fragment { children }), None => Ok(Node::Empty), }; } // Real tags from here on. i.e. <div></div> and <hr /> let opening_tag_name = parse_name(input)?; let attributes = parse_attributes(input)?; let is_component = is_component_name(&opening_tag_name); if input.is_next_punct(FORWARD_SLASH) { input.chomp_puncts(&[FORWARD_SLASH, RIGHT_ANGLE])?; if is_component { if let Value::Text(opening_tag_name_string) = opening_tag_name { return Ok(Node::SelfClosingComponent { name: opening_tag_name_string, attributes, }); } else { unreachable!("Component name was not parsed as `Value::Text` (this is a bug)"); } } else { return Ok(Node::SelfClosing { name: opening_tag_name, attributes, }); } } input.chomp_punct(RIGHT_ANGLE)?; let children = parse_children(input)?; // Closing Tag. input.chomp_puncts(&[LEFT_ANGLE, FORWARD_SLASH])?; let closing_tag_name = parse_name(input)?; input.chomp_punct(RIGHT_ANGLE)?; match (&opening_tag_name, &closing_tag_name) { (Value::Text(left_text), Value::Text(right_text)) => { if left_text != right_text { return Err(Error::MismatchedClosingTagName); } } (Value::Code(left_code), Value::Code(right_code)) => { if !right_code.is_empty() { if !token_stream_eq(&left_code, &right_code) { return Err(Error::MismatchedClosingTagCode); } } } _ => { return Err(Error::MismatchedClosingTagName); } } if is_component { if let Value::Text(opening_tag_name_string) = opening_tag_name { Ok(Node::OpenComponent { name: opening_tag_name_string, attributes, children, }) } else { unreachable!("Component name was not parsed as `Value::Text` (this is a bug)"); } } else { Ok(Node::Open { name: opening_tag_name, attributes, children, }) } } fn parse_node_text(input: &mut TokenIteratorVec) -> Result<Node> { Ok(Node::Text(parse_text(input, &TAG_OPENING_LOOKAHEAD)?)) } fn parse_attributes(input: &mut TokenIteratorVec) -> Result<Option<Vec<Attribute>>> { let mut maybe_attrs = None; while let Some(attribute) = parse_attribute(input)? { match maybe_attrs.as_mut() { None => maybe_attrs = Some(vec![attribute]), Some(attrs) => attrs.push(attribute), } } Ok(maybe_attrs) } fn parse_attribute(input: &mut TokenIteratorVec) -> Result<Option<Attribute>> { let maybe_key = parse_maybe_name(input)?; if maybe_key.is_none() { return Ok(None); } let key = maybe_key.unwrap(); if input.is_next_punct(EQUALS) { input.chomp_punct(EQUALS)?; let value = Some(parse_attribute_value(input)?); return Ok(Some(Attribute { key, value })); } Ok(Some(Attribute { key, value: None })) } fn parse_attribute_value(input: &mut TokenIteratorVec) -> Result<Value> { if input.is_brace_group() { Ok(Value::Code(input.chomp_brace_group()?)) } else { Ok(Value::Text(input.chomp_literal()?)) } } fn parse_name(input: &mut TokenIteratorVec) -> Result<Value> { match parse_maybe_name(input)? { Some(name) => Ok(name), None => Err(Error::ExpectedName), } } fn parse_maybe_name(input: &mut TokenIteratorVec) -> Result<Option<Value>> { let mut maybe_key: Option<String> = None; if input.is_next_literal() { return Ok(Some(Value::Text(input.chomp_literal()?))); } if input.is_brace_group() { return Ok(Some(Value::Code(input.chomp_brace_group()?))); } loop { if input.is_next_ident() { let next = input.chomp()?; match maybe_key.as_mut() { Some(key) => write!(key, "{}", next)?, None => maybe_key = Some(next.to_string()), } } // Check if there is `-name` or `:name` ahead of us. // If there is we will capture that too. let mut i = 0; while input.is_lookahead_punct(HYPHEN, i) || input.is_lookahead_punct(COLON, i) { i += 1; } if i > 0 && input.is_lookahead_ident(i) { while input.is_next_punct(HYPHEN) || input.is_next_punct(COLON) { match maybe_key.as_mut() { Some(key) => write!(key, "{}", input.chomp()?)?, None => maybe_key = Some(input.chomp()?.to_string()), } } continue; } break; } Ok(maybe_key.map(|text| Value::Text(text))) } /// Finds and grabs all child nodes, and then returns them in a Vec. /// `stopping_lookaheads` is for telling it what puncts to look for, /// to know to stop parsing. For a HTML tag this is `</`, and for a comment this is `-->`. fn parse_children(input: &mut TokenIteratorVec) -> Result<Option<Vec<Node>>> { let mut maybe_children = None; loop { if input.is_empty() { return Err(Error::MoreTokensExpected); } if input.is_lookahead_puncts(&TAG_CLOSING_LOOKAHEAD) { return Ok(maybe_children); } let child = parse_node(input)?; match maybe_children.as_mut() { Some(children) => children.push(child), None => maybe_children = Some(vec![child]), } } } fn parse_text(input: &mut TokenIteratorVec, stopping_lookaheads: &[char]) -> Result<String> { let mut text = String::new(); let mut last_spacing_rules = (false, false); while !input.is_brace_group() && !input.is_empty() && !input.is_lookahead_puncts(stopping_lookaheads) { let next = input.chomp()?; let next_spacing_rules = spacing_rules(&next); match (last_spacing_rules, next_spacing_rules) { ((_, true), (true, _)) => { write!(text, " ")?; } _ => {} } last_spacing_rules = next_spacing_rules; match next { TokenTree::Ident(ident) => { write!(text, "{}", ident)?; } TokenTree::Punct(punct) => { write!(text, "{}", punct)?; } TokenTree::Literal(literal) => { let literal_string = literal.to_string(); if literal_string.starts_with('"') { let literal_substring = &literal_string.as_str()[1..literal_string.len() - 1]; write!(text, "{}", literal_substring)?; } else { write!(text, "{}", literal_string)?; } } TokenTree::Group(_) => unreachable!(), } } Ok(text) } fn spacing_rules(tree: &TokenTree) -> (bool, bool) { match tree { TokenTree::Ident(_) => (true, true), TokenTree::Literal(_) => (true, true), TokenTree::Group(_) => (true, true), TokenTree::Punct(punct) => char_spacing_rules(punct.as_char()), } } fn char_spacing_rules(c: char) -> (bool, bool) { match c { '.' => (false, true), ',' => (false, true), ';' => (false, true), ':' => (false, true), '?' => (false, true), '!' => (false, true), '%' => (false, true), ')' => (false, true), ']' => (false, true), '>' => (false, true), '}' => (false, true), '(' => (true, false), '[' => (true, false), '{' => (true, false), '<' => (true, false), '-' => (false, false), _ => (true, true), } } fn is_component_name(opening_tag_name: &Value) -> bool { match opening_tag_name { Value::Code(_) => { return false; } Value::Text(name) => { let mut chars = name.chars(); // Starts with an uppercase character. match chars.next() { None => { return false; } Some(c) => { if !c.is_ascii_uppercase() { return false; } } } // The rest matches Rust identifier rules. for c in chars { if !c.is_alphanumeric() && c != '_' { return false; } } true } } } #[cfg(test)] mod parse { use super::*; use ::pretty_assertions::assert_eq; use ::quote::quote; #[cfg(test)] mod doctype { use super::*; #[test] fn it_should_render_doctype_html() -> Result<()> { let code = quote! { <!doctype html> }; let expected = Node::Doctype { name: Value::Text("doctype".to_string()), attributes: Some(vec![Attribute { key: Value::Text("html".to_string()), value: None, }]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_preserve_capitalisation() -> Result<()> { let code = quote! { <!DoCtYpE html> }; let expected = Node::Doctype { name: Value::Text("DoCtYpE".to_string()), attributes: Some(vec![Attribute { key: Value::Text("html".to_string()), value: None, }]), }; assert_eq_nodes(code, expected) } } #[cfg(test)] mod comments { use super::*; #[test] fn it_should_support_empty_comments() -> Result<()> { let code = quote! { <!-- --> }; let expected = Node::Comment { children: None }; assert_eq_nodes(code, expected) } #[test] fn it_should_support_simple_comments() -> Result<()> { let code = quote! { <!-- this is a comment --> }; let expected = Node::Comment { children: Some(vec![Node::Text("this is a comment".to_string())]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_tags_like_they_are_text() -> Result<()> { let code = quote! { <!-- this is a <div> </hr> comment --> }; let expected = Node::Comment { children: Some(vec![Node::Text( "this is a <div> </ hr> comment".to_string(), )]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_support_code() -> Result<()> { let code = quote! { <!-- this is a <div> </hr> comment {"this is some code"} "this is another string" --> }; let expected = Node::Comment { children: Some(vec![ Node::Text("this is a <div> </ hr> comment".to_string()), Node::Code(quote! {"this is some code"}), Node::Text("this is another string".to_string()), ]), }; assert_eq_nodes(code, expected) } } #[test] fn it_should_support_root_literals() -> Result<()> { let code = quote! { blah blah blah }; let expected = Node::Text("blah blah blah".to_string()); assert_eq_nodes(code, expected) } #[cfg(test)] mod fragments { use super::*; #[test] fn it_should_capture_self_closing_blank_nodes() -> Result<()> { let code = quote! { </> }; let expected = Node::Empty; assert_eq_nodes(code, expected) } #[test] fn it_should_capture_blank_nodes() -> Result<()> { let code = quote! { <></> }; let expected = Node::Empty; assert_eq_nodes(code, expected) } #[test] pub fn it_should_render_the_contents_of_fragments() -> Result<()> { let code = quote! { <> <h1>This is a heading</h1> This is some text <hr /> </> }; let expected = Node::Fragment { children: vec![ Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: Some(vec![Node::Text("This is a heading".to_string())]), }, Node::Text("This is some text".to_string()), Node::SelfClosing { name: Value::Text("hr".to_string()), attributes: None, }, ], }; assert_eq_nodes(code, expected) } } #[test] fn it_should_return_node_for_self_closing_tag() -> Result<()> { let code = quote! { <div/> }; let expected = Node::SelfClosing { name: Value::Text("div".to_string()), attributes: None, }; assert_eq_nodes(code, expected) } #[test] fn it_should_return_node_for_an_empty_tag() -> Result<()> { let code = quote! { <h1></h1> }; let expected = Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: None, }; assert_eq_nodes(code, expected) } #[test] fn it_should_return_an_error_on_mismatched_closing_node() { let code = quote! { <div></p> }; let error = parse(code.into()).err().unwrap(); assert_eq!(error, Error::MismatchedClosingTagName); } #[test] fn it_should_parse_lone_attributes() -> Result<()> { let code = quote! { <button is_disabled></button> }; let expected = Node::Open { name: Value::Text("button".to_string()), attributes: Some(vec![Attribute { key: Value::Text("is_disabled".to_string()), value: None, }]), children: None, }; assert_eq_nodes(code, expected) } #[test] fn it_should_not_support_hyphens_before_attribute_keys() { let code = quote! { <button --data-name="MrButton">Click me</button> }; let received = parse(code); assert_eq!(received.err().unwrap(), Error::UnexpectedToken); } #[test] fn it_should_parse_lone_attributes_on_self_closing_tags() -> Result<()> { let code = quote! { <button is_disabled /> }; let expected = Node::SelfClosing { name: Value::Text("button".to_string()), attributes: Some(vec![Attribute { key: Value::Text("is_disabled".to_string()), value: None, }]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_key_value_attributes() -> Result<()> { let code = quote! { <button type="input"></button> }; let expected = Node::Open { name: Value::Text("button".to_string()), attributes: Some(vec![Attribute { key: Value::Text("type".to_string()), value: Some(Value::Text("input".to_string())), }]), children: None, }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_key_value_attributes_on_self_closing_tags() -> Result<()> { let code = quote! { <button type="input" /> }; let expected = Node::SelfClosing { name: Value::Text("button".to_string()), attributes: Some(vec![Attribute { key: Value::Text("type".to_string()), value: Some(Value::Text("input".to_string())), }]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_key_value_code_attributes() -> Result<()> { let code = quote! { <button type={base_class.child("el")} /> }; let expected = Node::SelfClosing { name: Value::Text("button".to_string()), attributes: Some(vec![Attribute { key: Value::Text("type".to_string()), value: Some(Value::Code(quote! { base_class.child("el") })), }]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_child_nodes() -> Result<()> { let code = quote! { <div> <h1/> </div> }; let expected = Node::Open { name: Value::Text("div".to_string()), attributes: None, children: Some(vec![Node::SelfClosing { name: Value::Text("h1".to_string()), attributes: None, }]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_multiple_child_nodes() -> Result<()> { let code = quote! { <div> <h1></h1> <span> <div></div> </span> <article /> </div> }; let expected = Node::Open { name: Value::Text("div".to_string()), attributes: None, children: Some(vec![ Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: None, }, Node::Open { name: Value::Text("span".to_string()), attributes: None, children: Some(vec![Node::Open { name: Value::Text("div".to_string()), attributes: None, children: None, }]), }, Node::SelfClosing { name: Value::Text("article".to_string()), attributes: None, }, ]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_code_in_a_node() -> Result<()> { let code = quote! { <div> { if foo { &"blah" } else { &"foobar" } } </div> }; let expected = Node::Open { name: Value::Text("div".to_string()), attributes: None, children: Some(vec![Node::Code(quote! { if foo { &"blah" } else { &"foobar" } })]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_text_in_a_node() -> Result<()> { let code = quote! { <h1> Upgrade today! </h1> }; let expected = Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: Some(vec![Node::Text("Upgrade today!".to_string())]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_text_and_bracket_in_a_node() -> Result<()> { let code = quote! { <h1> (Upgrade today!) </h1> }; let expected = Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: Some(vec![Node::Text("(Upgrade today!)".to_string())]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_text_and_bracket_in_a_node_complex_example() -> Result<()> { let code = quote! { <h1> You should (Upgrade (to something new) today! + = 5 (maybe)) if you want to </h1> }; let expected = Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: Some(vec![Node::Text( "You should (Upgrade (to something new) today! + = 5 (maybe)) if you want to" .to_string(), )]), }; assert_eq_nodes(code, expected) } #[test] fn it_should_parse_text_with_quotes_in_a_node() -> Result<()> { let code = quote! { <h1> "Upgrade today!" </h1> }; let expected = Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: Some(vec![Node::Text("Upgrade today!".to_string())]), }; assert_eq_nodes(code, expected) } #[cfg(test)] mod root_fragments { use super::*; use ::quote::quote; #[test] fn it_should_return_fragment_if_content_after_html_in_root() -> Result<()> { let code = quote! { <h1> "Upgrade today!" </h1> blah blah }; let expected = Node::Fragment { children: vec![ Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: Some(vec![Node::Text("Upgrade today!".to_string())]), }, Node::Text("blah blah".to_string()), ], }; assert_eq_nodes(code, expected) } #[test] fn it_should_return_fragment_if_html_after_content_in_root() -> Result<()> { let code = quote! { blah blah <h1> "Upgrade today!" </h1> }; let expected = Node::Fragment { children: vec![ Node::Text("blah blah".to_string()), Node::Open { name: Value::Text("h1".to_string()), attributes: None, children: Some(vec![Node::Text("Upgrade today!".to_string())]), }, ], }; assert_eq_nodes(code, expected) } } fn assert_eq_nodes(tokens: TokenStream, expected_nodes: Node) -> Result<()> { let nodes = parse(tokens.into())?; assert_eq!(nodes, expected_nodes); Ok(()) } }
28.1963
98
0.494405
08a42c6c221177cf5e6e3638d77825c9e839b82c
3,102
use bevy::math::{Mat4, Vec3}; use bevy_mod_raycast::Ray3d; use criterion::{black_box, criterion_group, criterion_main, Criterion}; fn ptoxznorm(p: u32, size: u32) -> (f32, f32) { let ij = (p / (size), p % (size)); (ij.0 as f32 / size as f32, ij.1 as f32 / size as f32) } struct SimpleMesh { positions: Vec<[f32; 3]>, normals: Vec<[f32; 3]>, indices: Vec<u32>, } fn mesh_creation(vertices_per_side: u32) -> SimpleMesh { let mut positions = Vec::new(); let mut normals = Vec::new(); for p in 0..vertices_per_side.pow(2) { let xz = ptoxznorm(p, vertices_per_side); positions.push([xz.0 - 0.5, 0.0, xz.1 - 0.5]); normals.push([0.0, 1.0, 0.0]); } let mut indices = vec![]; for p in 0..vertices_per_side.pow(2) { if p % (vertices_per_side) != vertices_per_side - 1 && p / (vertices_per_side) != vertices_per_side - 1 { indices.extend_from_slice(&[p, p + 1, p + vertices_per_side]); indices.extend_from_slice(&[p + vertices_per_side, p + 1, p + vertices_per_side + 1]); } } SimpleMesh { positions, normals, indices, } } fn ray_mesh_intersection(c: &mut Criterion) { let mut group = c.benchmark_group("ray_mesh_intersection"); group.warm_up_time(std::time::Duration::from_millis(500)); for vertices_per_side in [10_u32, 100, 1000] { group.bench_function(format!("{}_vertices", vertices_per_side.pow(2)), |b| { let ray = Ray3d::new(Vec3::new(0.0, 1.0, 0.0), Vec3::new(0.0, -1.0, 0.0)); let mesh_to_world = Mat4::IDENTITY; let mesh = mesh_creation(vertices_per_side); b.iter(|| { black_box(bevy_mod_raycast::ray_mesh_intersection( &mesh_to_world, &mesh.positions, Some(&mesh.normals), &ray, Some(&mesh.indices), )); }); }); } } fn ray_mesh_intersection_no_intersection(c: &mut Criterion) { let mut group = c.benchmark_group("ray_mesh_intersection_no_intersection"); group.warm_up_time(std::time::Duration::from_millis(500)); for vertices_per_side in [10_u32, 100, 1000] { group.bench_function(format!("{}_vertices", (vertices_per_side).pow(2)), |b| { let ray = Ray3d::new(Vec3::new(0.0, 1.0, 0.0), Vec3::new(1.0, 0.0, 0.0)); let mesh_to_world = Mat4::IDENTITY; let mesh = mesh_creation(vertices_per_side); b.iter(|| { black_box(bevy_mod_raycast::ray_mesh_intersection( &mesh_to_world, &mesh.positions, Some(&mesh.normals), &ray, Some(&mesh.indices), )); }); }); } } criterion_group!( benches, ray_mesh_intersection, ray_mesh_intersection_no_intersection ); criterion_main!(benches);
33
99
0.544487
feb441e2270a3641ebce2e3894643e18a0891795
1,587
use ckb_vm_definitions::instructions as insts; use super::utils::{funct3, funct7, opcode, rd, rs1, rs2}; use super::{set_instruction_length_4, Instruction, Register, Rtype}; pub fn factory<R: Register>(instruction_bits: u32, _: u32) -> Option<Instruction> { let bit_length = R::BITS; if bit_length != 32 && bit_length != 64 { return None; } let rv64 = bit_length == 64; if funct7(instruction_bits) != 0b_0000001 { return None; } let inst_opt = match opcode(instruction_bits) { 0b_0110011 => match funct3(instruction_bits) { 0b_000 => Some(insts::OP_MUL), 0b_001 => Some(insts::OP_MULH), 0b_010 => Some(insts::OP_MULHSU), 0b_011 => Some(insts::OP_MULHU), 0b_100 => Some(insts::OP_DIV), 0b_101 => Some(insts::OP_DIVU), 0b_110 => Some(insts::OP_REM), 0b_111 => Some(insts::OP_REMU), _ => None, }, 0b_0111011 if rv64 => match funct3(instruction_bits) { 0b_000 => Some(insts::OP_MULW), 0b_100 => Some(insts::OP_DIVW), 0b_101 => Some(insts::OP_DIVUW), 0b_110 => Some(insts::OP_REMW), 0b_111 => Some(insts::OP_REMUW), _ => None, }, _ => None, }; inst_opt .map(|inst| { Rtype::new( inst, rd(instruction_bits), rs1(instruction_bits), rs2(instruction_bits), ) .0 }) .map(set_instruction_length_4) }
32.387755
83
0.532451
1e559c70f2ba73bbd2d24ceb299e4465e66f9460
139,671
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[allow(clippy::unnecessary_wraps)] pub fn parse_batch_check_layer_availability_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::BatchCheckLayerAvailabilityOutput, crate::error::BatchCheckLayerAvailabilityError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::BatchCheckLayerAvailabilityError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => { return Err(crate::error::BatchCheckLayerAvailabilityError::unhandled( generic, )) } }; Err(match error_code { "InvalidParameterException" => crate::error::BatchCheckLayerAvailabilityError { meta: generic, kind: crate::error::BatchCheckLayerAvailabilityErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::BatchCheckLayerAvailabilityError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => { crate::error::BatchCheckLayerAvailabilityError { meta: generic, kind: crate::error::BatchCheckLayerAvailabilityErrorKind::RepositoryNotFoundException( { #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::BatchCheckLayerAvailabilityError::unhandled)?; output.build() }, ), } } "ServerException" => crate::error::BatchCheckLayerAvailabilityError { meta: generic, kind: crate::error::BatchCheckLayerAvailabilityErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::BatchCheckLayerAvailabilityError::unhandled)?; output.build() }), }, _ => crate::error::BatchCheckLayerAvailabilityError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_batch_check_layer_availability_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::BatchCheckLayerAvailabilityOutput, crate::error::BatchCheckLayerAvailabilityError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::batch_check_layer_availability_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_batch_check_layer_availability( response.body().as_ref(), output, ) .map_err(crate::error::BatchCheckLayerAvailabilityError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_batch_delete_image_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::BatchDeleteImageOutput, crate::error::BatchDeleteImageError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::BatchDeleteImageError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::BatchDeleteImageError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::BatchDeleteImageError { meta: generic, kind: crate::error::BatchDeleteImageErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::BatchDeleteImageError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::BatchDeleteImageError { meta: generic, kind: crate::error::BatchDeleteImageErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::BatchDeleteImageError::unhandled)?; output.build() }), }, "ServerException" => crate::error::BatchDeleteImageError { meta: generic, kind: crate::error::BatchDeleteImageErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::BatchDeleteImageError::unhandled)?; output.build() }), }, _ => crate::error::BatchDeleteImageError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_batch_delete_image_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::BatchDeleteImageOutput, crate::error::BatchDeleteImageError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::batch_delete_image_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_batch_delete_image(response.body().as_ref(), output) .map_err(crate::error::BatchDeleteImageError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_batch_get_image_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::BatchGetImageOutput, crate::error::BatchGetImageError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::BatchGetImageError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::BatchGetImageError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::BatchGetImageError { meta: generic, kind: crate::error::BatchGetImageErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::BatchGetImageError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::BatchGetImageError { meta: generic, kind: crate::error::BatchGetImageErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::BatchGetImageError::unhandled)?; output.build() }), }, "ServerException" => crate::error::BatchGetImageError { meta: generic, kind: crate::error::BatchGetImageErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::BatchGetImageError::unhandled)?; output.build() }), }, _ => crate::error::BatchGetImageError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_batch_get_image_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::BatchGetImageOutput, crate::error::BatchGetImageError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::batch_get_image_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_batch_get_image(response.body().as_ref(), output) .map_err(crate::error::BatchGetImageError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_complete_layer_upload_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::CompleteLayerUploadOutput, crate::error::CompleteLayerUploadError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::CompleteLayerUploadError::unhandled(generic)), }; Err(match error_code { "EmptyUploadException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::EmptyUploadException({ #[allow(unused_mut)] let mut output = crate::error::empty_upload_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_empty_upload_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, "InvalidLayerException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::InvalidLayerException({ #[allow(unused_mut)] let mut output = crate::error::invalid_layer_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_layer_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, "InvalidParameterException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, "KmsException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::KmsException({ #[allow(unused_mut)] let mut output = crate::error::kms_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_kms_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, "LayerAlreadyExistsException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::LayerAlreadyExistsException({ #[allow(unused_mut)] let mut output = crate::error::layer_already_exists_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_layer_already_exists_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, "LayerPartTooSmallException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::LayerPartTooSmallException({ #[allow(unused_mut)] let mut output = crate::error::layer_part_too_small_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_layer_part_too_small_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, "ServerException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, "UploadNotFoundException" => crate::error::CompleteLayerUploadError { meta: generic, kind: crate::error::CompleteLayerUploadErrorKind::UploadNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::upload_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_upload_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }), }, _ => crate::error::CompleteLayerUploadError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_complete_layer_upload_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::CompleteLayerUploadOutput, crate::error::CompleteLayerUploadError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::complete_layer_upload_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_complete_layer_upload( response.body().as_ref(), output, ) .map_err(crate::error::CompleteLayerUploadError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_create_repository_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::CreateRepositoryOutput, crate::error::CreateRepositoryError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::CreateRepositoryError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::CreateRepositoryError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::CreateRepositoryError { meta: generic, kind: crate::error::CreateRepositoryErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CreateRepositoryError::unhandled)?; output.build() }), }, "InvalidTagParameterException" => crate::error::CreateRepositoryError { meta: generic, kind: crate::error::CreateRepositoryErrorKind::InvalidTagParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_tag_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_tag_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CreateRepositoryError::unhandled)?; output.build() }), }, "KmsException" => crate::error::CreateRepositoryError { meta: generic, kind: crate::error::CreateRepositoryErrorKind::KmsException({ #[allow(unused_mut)] let mut output = crate::error::kms_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_kms_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CreateRepositoryError::unhandled)?; output.build() }), }, "LimitExceededException" => crate::error::CreateRepositoryError { meta: generic, kind: crate::error::CreateRepositoryErrorKind::LimitExceededException({ #[allow(unused_mut)] let mut output = crate::error::limit_exceeded_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CreateRepositoryError::unhandled)?; output.build() }), }, "RepositoryAlreadyExistsException" => { crate::error::CreateRepositoryError { meta: generic, kind: crate::error::CreateRepositoryErrorKind::RepositoryAlreadyExistsException({ #[allow(unused_mut)] let mut output = crate::error::repository_already_exists_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_already_exists_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::CreateRepositoryError::unhandled)?; output.build() }), } } "ServerException" => crate::error::CreateRepositoryError { meta: generic, kind: crate::error::CreateRepositoryErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CreateRepositoryError::unhandled)?; output.build() }), }, "TooManyTagsException" => crate::error::CreateRepositoryError { meta: generic, kind: crate::error::CreateRepositoryErrorKind::TooManyTagsException({ #[allow(unused_mut)] let mut output = crate::error::too_many_tags_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_too_many_tags_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::CreateRepositoryError::unhandled)?; output.build() }), }, _ => crate::error::CreateRepositoryError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_create_repository_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::CreateRepositoryOutput, crate::error::CreateRepositoryError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::create_repository_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_create_repository(response.body().as_ref(), output) .map_err(crate::error::CreateRepositoryError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_delete_lifecycle_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DeleteLifecyclePolicyOutput, crate::error::DeleteLifecyclePolicyError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::DeleteLifecyclePolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::DeleteLifecyclePolicyError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::DeleteLifecyclePolicyError { meta: generic, kind: crate::error::DeleteLifecyclePolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteLifecyclePolicyError::unhandled)?; output.build() }), }, "LifecyclePolicyNotFoundException" => crate::error::DeleteLifecyclePolicyError { meta: generic, kind: crate::error::DeleteLifecyclePolicyErrorKind::LifecyclePolicyNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::lifecycle_policy_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_lifecycle_policy_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteLifecyclePolicyError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::DeleteLifecyclePolicyError { meta: generic, kind: crate::error::DeleteLifecyclePolicyErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteLifecyclePolicyError::unhandled)?; output.build() }), }, "ServerException" => crate::error::DeleteLifecyclePolicyError { meta: generic, kind: crate::error::DeleteLifecyclePolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteLifecyclePolicyError::unhandled)?; output.build() }), }, _ => crate::error::DeleteLifecyclePolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_delete_lifecycle_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DeleteLifecyclePolicyOutput, crate::error::DeleteLifecyclePolicyError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::delete_lifecycle_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_delete_lifecycle_policy( response.body().as_ref(), output, ) .map_err(crate::error::DeleteLifecyclePolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_delete_registry_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DeleteRegistryPolicyOutput, crate::error::DeleteRegistryPolicyError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::DeleteRegistryPolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::DeleteRegistryPolicyError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::DeleteRegistryPolicyError { meta: generic, kind: crate::error::DeleteRegistryPolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRegistryPolicyError::unhandled)?; output.build() }), }, "RegistryPolicyNotFoundException" => { crate::error::DeleteRegistryPolicyError { meta: generic, kind: crate::error::DeleteRegistryPolicyErrorKind::RegistryPolicyNotFoundException( { #[allow(unused_mut)] let mut output = crate::error::registry_policy_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_registry_policy_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteRegistryPolicyError::unhandled)?; output.build() }, ), } } "ServerException" => crate::error::DeleteRegistryPolicyError { meta: generic, kind: crate::error::DeleteRegistryPolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRegistryPolicyError::unhandled)?; output.build() }), }, _ => crate::error::DeleteRegistryPolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_delete_registry_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DeleteRegistryPolicyOutput, crate::error::DeleteRegistryPolicyError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::delete_registry_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_delete_registry_policy( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRegistryPolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_delete_repository_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::DeleteRepositoryOutput, crate::error::DeleteRepositoryError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::DeleteRepositoryError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::DeleteRepositoryError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::DeleteRepositoryError { meta: generic, kind: crate::error::DeleteRepositoryErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryError::unhandled)?; output.build() }), }, "KmsException" => crate::error::DeleteRepositoryError { meta: generic, kind: crate::error::DeleteRepositoryErrorKind::KmsException({ #[allow(unused_mut)] let mut output = crate::error::kms_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_kms_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryError::unhandled)?; output.build() }), }, "RepositoryNotEmptyException" => crate::error::DeleteRepositoryError { meta: generic, kind: crate::error::DeleteRepositoryErrorKind::RepositoryNotEmptyException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_empty_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_empty_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::DeleteRepositoryError { meta: generic, kind: crate::error::DeleteRepositoryErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryError::unhandled)?; output.build() }), }, "ServerException" => crate::error::DeleteRepositoryError { meta: generic, kind: crate::error::DeleteRepositoryErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryError::unhandled)?; output.build() }), }, _ => crate::error::DeleteRepositoryError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_delete_repository_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::DeleteRepositoryOutput, crate::error::DeleteRepositoryError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::delete_repository_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_delete_repository(response.body().as_ref(), output) .map_err(crate::error::DeleteRepositoryError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_delete_repository_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DeleteRepositoryPolicyOutput, crate::error::DeleteRepositoryPolicyError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::DeleteRepositoryPolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => { return Err(crate::error::DeleteRepositoryPolicyError::unhandled( generic, )) } }; Err(match error_code { "InvalidParameterException" => crate::error::DeleteRepositoryPolicyError { meta: generic, kind: crate::error::DeleteRepositoryPolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryPolicyError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::DeleteRepositoryPolicyError { meta: generic, kind: crate::error::DeleteRepositoryPolicyErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryPolicyError::unhandled)?; output.build() }), }, "RepositoryPolicyNotFoundException" => crate::error::DeleteRepositoryPolicyError { meta: generic, kind: crate::error::DeleteRepositoryPolicyErrorKind::RepositoryPolicyNotFoundException( { #[allow(unused_mut)] let mut output = crate::error::repository_policy_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_policy_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteRepositoryPolicyError::unhandled)?; output.build() }, ), }, "ServerException" => crate::error::DeleteRepositoryPolicyError { meta: generic, kind: crate::error::DeleteRepositoryPolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryPolicyError::unhandled)?; output.build() }), }, _ => crate::error::DeleteRepositoryPolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_delete_repository_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DeleteRepositoryPolicyOutput, crate::error::DeleteRepositoryPolicyError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::delete_repository_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_delete_repository_policy( response.body().as_ref(), output, ) .map_err(crate::error::DeleteRepositoryPolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_describe_images_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::DescribeImagesOutput, crate::error::DescribeImagesError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::DescribeImagesError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::DescribeImagesError::unhandled(generic)), }; Err(match error_code { "ImageNotFoundException" => crate::error::DescribeImagesError { meta: generic, kind: crate::error::DescribeImagesErrorKind::ImageNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::image_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_image_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImagesError::unhandled)?; output.build() }), }, "InvalidParameterException" => crate::error::DescribeImagesError { meta: generic, kind: crate::error::DescribeImagesErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImagesError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::DescribeImagesError { meta: generic, kind: crate::error::DescribeImagesErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImagesError::unhandled)?; output.build() }), }, "ServerException" => crate::error::DescribeImagesError { meta: generic, kind: crate::error::DescribeImagesErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImagesError::unhandled)?; output.build() }), }, _ => crate::error::DescribeImagesError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_describe_images_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::DescribeImagesOutput, crate::error::DescribeImagesError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::describe_images_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_describe_images(response.body().as_ref(), output) .map_err(crate::error::DescribeImagesError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_describe_image_scan_findings_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DescribeImageScanFindingsOutput, crate::error::DescribeImageScanFindingsError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::DescribeImageScanFindingsError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => { return Err(crate::error::DescribeImageScanFindingsError::unhandled( generic, )) } }; Err(match error_code { "ImageNotFoundException" => crate::error::DescribeImageScanFindingsError { meta: generic, kind: crate::error::DescribeImageScanFindingsErrorKind::ImageNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::image_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_image_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImageScanFindingsError::unhandled)?; output.build() }), }, "InvalidParameterException" => crate::error::DescribeImageScanFindingsError { meta: generic, kind: crate::error::DescribeImageScanFindingsErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImageScanFindingsError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::DescribeImageScanFindingsError { meta: generic, kind: crate::error::DescribeImageScanFindingsErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImageScanFindingsError::unhandled)?; output.build() }), }, "ScanNotFoundException" => crate::error::DescribeImageScanFindingsError { meta: generic, kind: crate::error::DescribeImageScanFindingsErrorKind::ScanNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::scan_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_scan_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImageScanFindingsError::unhandled)?; output.build() }), }, "ServerException" => crate::error::DescribeImageScanFindingsError { meta: generic, kind: crate::error::DescribeImageScanFindingsErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImageScanFindingsError::unhandled)?; output.build() }), }, _ => crate::error::DescribeImageScanFindingsError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_describe_image_scan_findings_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DescribeImageScanFindingsOutput, crate::error::DescribeImageScanFindingsError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::describe_image_scan_findings_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_describe_image_scan_findings( response.body().as_ref(), output, ) .map_err(crate::error::DescribeImageScanFindingsError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_describe_registry_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::DescribeRegistryOutput, crate::error::DescribeRegistryError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::DescribeRegistryError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::DescribeRegistryError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::DescribeRegistryError { meta: generic, kind: crate::error::DescribeRegistryErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeRegistryError::unhandled)?; output.build() }), }, "ServerException" => crate::error::DescribeRegistryError { meta: generic, kind: crate::error::DescribeRegistryErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeRegistryError::unhandled)?; output.build() }), }, "ValidationException" => crate::error::DescribeRegistryError { meta: generic, kind: crate::error::DescribeRegistryErrorKind::ValidationException({ #[allow(unused_mut)] let mut output = crate::error::validation_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_validation_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeRegistryError::unhandled)?; output.build() }), }, _ => crate::error::DescribeRegistryError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_describe_registry_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::DescribeRegistryOutput, crate::error::DescribeRegistryError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::describe_registry_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_describe_registry(response.body().as_ref(), output) .map_err(crate::error::DescribeRegistryError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_describe_repositories_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DescribeRepositoriesOutput, crate::error::DescribeRepositoriesError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::DescribeRepositoriesError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::DescribeRepositoriesError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::DescribeRepositoriesError { meta: generic, kind: crate::error::DescribeRepositoriesErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeRepositoriesError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::DescribeRepositoriesError { meta: generic, kind: crate::error::DescribeRepositoriesErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeRepositoriesError::unhandled)?; output.build() }), }, "ServerException" => crate::error::DescribeRepositoriesError { meta: generic, kind: crate::error::DescribeRepositoriesErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::DescribeRepositoriesError::unhandled)?; output.build() }), }, _ => crate::error::DescribeRepositoriesError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_describe_repositories_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::DescribeRepositoriesOutput, crate::error::DescribeRepositoriesError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::describe_repositories_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_describe_repositories( response.body().as_ref(), output, ) .map_err(crate::error::DescribeRepositoriesError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_authorization_token_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetAuthorizationTokenOutput, crate::error::GetAuthorizationTokenError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::GetAuthorizationTokenError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::GetAuthorizationTokenError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::GetAuthorizationTokenError { meta: generic, kind: crate::error::GetAuthorizationTokenErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetAuthorizationTokenError::unhandled)?; output.build() }), }, "ServerException" => crate::error::GetAuthorizationTokenError { meta: generic, kind: crate::error::GetAuthorizationTokenErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetAuthorizationTokenError::unhandled)?; output.build() }), }, _ => crate::error::GetAuthorizationTokenError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_authorization_token_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetAuthorizationTokenOutput, crate::error::GetAuthorizationTokenError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::get_authorization_token_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_get_authorization_token( response.body().as_ref(), output, ) .map_err(crate::error::GetAuthorizationTokenError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_download_url_for_layer_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetDownloadUrlForLayerOutput, crate::error::GetDownloadUrlForLayerError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::GetDownloadUrlForLayerError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => { return Err(crate::error::GetDownloadUrlForLayerError::unhandled( generic, )) } }; Err(match error_code { "InvalidParameterException" => crate::error::GetDownloadUrlForLayerError { meta: generic, kind: crate::error::GetDownloadUrlForLayerErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetDownloadUrlForLayerError::unhandled)?; output.build() }), }, "LayerInaccessibleException" => crate::error::GetDownloadUrlForLayerError { meta: generic, kind: crate::error::GetDownloadUrlForLayerErrorKind::LayerInaccessibleException({ #[allow(unused_mut)] let mut output = crate::error::layer_inaccessible_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_layer_inaccessible_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetDownloadUrlForLayerError::unhandled)?; output.build() }), }, "LayersNotFoundException" => crate::error::GetDownloadUrlForLayerError { meta: generic, kind: crate::error::GetDownloadUrlForLayerErrorKind::LayersNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::layers_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_layers_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetDownloadUrlForLayerError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::GetDownloadUrlForLayerError { meta: generic, kind: crate::error::GetDownloadUrlForLayerErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetDownloadUrlForLayerError::unhandled)?; output.build() }), }, "ServerException" => crate::error::GetDownloadUrlForLayerError { meta: generic, kind: crate::error::GetDownloadUrlForLayerErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetDownloadUrlForLayerError::unhandled)?; output.build() }), }, _ => crate::error::GetDownloadUrlForLayerError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_download_url_for_layer_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetDownloadUrlForLayerOutput, crate::error::GetDownloadUrlForLayerError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::get_download_url_for_layer_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_get_download_url_for_layer( response.body().as_ref(), output, ) .map_err(crate::error::GetDownloadUrlForLayerError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_lifecycle_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetLifecyclePolicyOutput, crate::error::GetLifecyclePolicyError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::GetLifecyclePolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::GetLifecyclePolicyError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::GetLifecyclePolicyError { meta: generic, kind: crate::error::GetLifecyclePolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetLifecyclePolicyError::unhandled)?; output.build() }), }, "LifecyclePolicyNotFoundException" => crate::error::GetLifecyclePolicyError { meta: generic, kind: crate::error::GetLifecyclePolicyErrorKind::LifecyclePolicyNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::lifecycle_policy_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_lifecycle_policy_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetLifecyclePolicyError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::GetLifecyclePolicyError { meta: generic, kind: crate::error::GetLifecyclePolicyErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetLifecyclePolicyError::unhandled)?; output.build() }), }, "ServerException" => crate::error::GetLifecyclePolicyError { meta: generic, kind: crate::error::GetLifecyclePolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetLifecyclePolicyError::unhandled)?; output.build() }), }, _ => crate::error::GetLifecyclePolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_lifecycle_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetLifecyclePolicyOutput, crate::error::GetLifecyclePolicyError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::get_lifecycle_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_get_lifecycle_policy( response.body().as_ref(), output, ) .map_err(crate::error::GetLifecyclePolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_lifecycle_policy_preview_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetLifecyclePolicyPreviewOutput, crate::error::GetLifecyclePolicyPreviewError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::GetLifecyclePolicyPreviewError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => { return Err(crate::error::GetLifecyclePolicyPreviewError::unhandled( generic, )) } }; Err(match error_code { "InvalidParameterException" => crate::error::GetLifecyclePolicyPreviewError { meta: generic, kind: crate::error::GetLifecyclePolicyPreviewErrorKind::InvalidParameterException({ #[allow(unused_mut)]let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetLifecyclePolicyPreviewError::unhandled)?; output.build() })}, "LifecyclePolicyPreviewNotFoundException" => crate::error::GetLifecyclePolicyPreviewError { meta: generic, kind: crate::error::GetLifecyclePolicyPreviewErrorKind::LifecyclePolicyPreviewNotFoundException({ #[allow(unused_mut)]let mut output = crate::error::lifecycle_policy_preview_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_lifecycle_policy_preview_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetLifecyclePolicyPreviewError::unhandled)?; output.build() })}, "RepositoryNotFoundException" => crate::error::GetLifecyclePolicyPreviewError { meta: generic, kind: crate::error::GetLifecyclePolicyPreviewErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)]let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetLifecyclePolicyPreviewError::unhandled)?; output.build() })}, "ServerException" => crate::error::GetLifecyclePolicyPreviewError { meta: generic, kind: crate::error::GetLifecyclePolicyPreviewErrorKind::ServerException({ #[allow(unused_mut)]let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetLifecyclePolicyPreviewError::unhandled)?; output.build() })}, _ => crate::error::GetLifecyclePolicyPreviewError::generic(generic) }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_lifecycle_policy_preview_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetLifecyclePolicyPreviewOutput, crate::error::GetLifecyclePolicyPreviewError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::get_lifecycle_policy_preview_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_get_lifecycle_policy_preview( response.body().as_ref(), output, ) .map_err(crate::error::GetLifecyclePolicyPreviewError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_registry_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::GetRegistryPolicyOutput, crate::error::GetRegistryPolicyError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::GetRegistryPolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::GetRegistryPolicyError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::GetRegistryPolicyError { meta: generic, kind: crate::error::GetRegistryPolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetRegistryPolicyError::unhandled)?; output.build() }), }, "RegistryPolicyNotFoundException" => { crate::error::GetRegistryPolicyError { meta: generic, kind: crate::error::GetRegistryPolicyErrorKind::RegistryPolicyNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::registry_policy_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_registry_policy_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetRegistryPolicyError::unhandled)?; output.build() }), } } "ServerException" => crate::error::GetRegistryPolicyError { meta: generic, kind: crate::error::GetRegistryPolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetRegistryPolicyError::unhandled)?; output.build() }), }, _ => crate::error::GetRegistryPolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_registry_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::GetRegistryPolicyOutput, crate::error::GetRegistryPolicyError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::get_registry_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_get_registry_policy( response.body().as_ref(), output, ) .map_err(crate::error::GetRegistryPolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_repository_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetRepositoryPolicyOutput, crate::error::GetRepositoryPolicyError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::GetRepositoryPolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::GetRepositoryPolicyError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::GetRepositoryPolicyError { meta: generic, kind: crate::error::GetRepositoryPolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetRepositoryPolicyError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::GetRepositoryPolicyError { meta: generic, kind: crate::error::GetRepositoryPolicyErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetRepositoryPolicyError::unhandled)?; output.build() }), }, "RepositoryPolicyNotFoundException" => crate::error::GetRepositoryPolicyError { meta: generic, kind: crate::error::GetRepositoryPolicyErrorKind::RepositoryPolicyNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_policy_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_policy_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetRepositoryPolicyError::unhandled)?; output.build() }), }, "ServerException" => crate::error::GetRepositoryPolicyError { meta: generic, kind: crate::error::GetRepositoryPolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::GetRepositoryPolicyError::unhandled)?; output.build() }), }, _ => crate::error::GetRepositoryPolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_get_repository_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::GetRepositoryPolicyOutput, crate::error::GetRepositoryPolicyError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::get_repository_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_get_repository_policy( response.body().as_ref(), output, ) .map_err(crate::error::GetRepositoryPolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_initiate_layer_upload_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::InitiateLayerUploadOutput, crate::error::InitiateLayerUploadError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::InitiateLayerUploadError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::InitiateLayerUploadError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::InitiateLayerUploadError { meta: generic, kind: crate::error::InitiateLayerUploadErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::InitiateLayerUploadError::unhandled)?; output.build() }), }, "KmsException" => crate::error::InitiateLayerUploadError { meta: generic, kind: crate::error::InitiateLayerUploadErrorKind::KmsException({ #[allow(unused_mut)] let mut output = crate::error::kms_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_kms_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::InitiateLayerUploadError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::InitiateLayerUploadError { meta: generic, kind: crate::error::InitiateLayerUploadErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::InitiateLayerUploadError::unhandled)?; output.build() }), }, "ServerException" => crate::error::InitiateLayerUploadError { meta: generic, kind: crate::error::InitiateLayerUploadErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::InitiateLayerUploadError::unhandled)?; output.build() }), }, _ => crate::error::InitiateLayerUploadError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_initiate_layer_upload_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::InitiateLayerUploadOutput, crate::error::InitiateLayerUploadError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::initiate_layer_upload_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_initiate_layer_upload( response.body().as_ref(), output, ) .map_err(crate::error::InitiateLayerUploadError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_list_images_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::ListImagesOutput, crate::error::ListImagesError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::ListImagesError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::ListImagesError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::ListImagesError { meta: generic, kind: crate::error::ListImagesErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::ListImagesError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::ListImagesError { meta: generic, kind: crate::error::ListImagesErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::ListImagesError::unhandled)?; output.build() }), }, "ServerException" => crate::error::ListImagesError { meta: generic, kind: crate::error::ListImagesErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::ListImagesError::unhandled)?; output.build() }), }, _ => crate::error::ListImagesError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_list_images_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::ListImagesOutput, crate::error::ListImagesError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::list_images_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_list_images(response.body().as_ref(), output) .map_err(crate::error::ListImagesError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_list_tags_for_resource_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::ListTagsForResourceOutput, crate::error::ListTagsForResourceError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::ListTagsForResourceError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::ListTagsForResourceError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::ListTagsForResourceError { meta: generic, kind: crate::error::ListTagsForResourceErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::ListTagsForResourceError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::ListTagsForResourceError { meta: generic, kind: crate::error::ListTagsForResourceErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::ListTagsForResourceError::unhandled)?; output.build() }), }, "ServerException" => crate::error::ListTagsForResourceError { meta: generic, kind: crate::error::ListTagsForResourceErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::ListTagsForResourceError::unhandled)?; output.build() }), }, _ => crate::error::ListTagsForResourceError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_list_tags_for_resource_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::ListTagsForResourceOutput, crate::error::ListTagsForResourceError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::list_tags_for_resource_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_list_tags_for_resource( response.body().as_ref(), output, ) .map_err(crate::error::ListTagsForResourceError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_image_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::PutImageOutput, crate::error::PutImageError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::PutImageError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::PutImageError::unhandled(generic)), }; Err(match error_code { "ImageAlreadyExistsException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::ImageAlreadyExistsException({ #[allow(unused_mut)] let mut output = crate::error::image_already_exists_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_image_already_exists_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, "ImageDigestDoesNotMatchException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::ImageDigestDoesNotMatchException({ #[allow(unused_mut)] let mut output = crate::error::image_digest_does_not_match_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_image_digest_does_not_match_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, "ImageTagAlreadyExistsException" => { crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::ImageTagAlreadyExistsException({ #[allow(unused_mut)] let mut output = crate::error::image_tag_already_exists_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_image_tag_already_exists_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::PutImageError::unhandled)?; output.build() }), } } "InvalidParameterException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, "KmsException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::KmsException({ #[allow(unused_mut)] let mut output = crate::error::kms_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_kms_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, "LayersNotFoundException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::LayersNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::layers_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_layers_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, "LimitExceededException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::LimitExceededException({ #[allow(unused_mut)] let mut output = crate::error::limit_exceeded_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, "ReferencedImagesNotFoundException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::ReferencedImagesNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::referenced_images_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_referenced_images_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, "ServerException" => crate::error::PutImageError { meta: generic, kind: crate::error::PutImageErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageError::unhandled)?; output.build() }), }, _ => crate::error::PutImageError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_image_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::PutImageOutput, crate::error::PutImageError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::put_image_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_put_image(response.body().as_ref(), output) .map_err(crate::error::PutImageError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_image_scanning_configuration_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::PutImageScanningConfigurationOutput, crate::error::PutImageScanningConfigurationError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::PutImageScanningConfigurationError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => { return Err(crate::error::PutImageScanningConfigurationError::unhandled( generic, )) } }; Err(match error_code { "InvalidParameterException" => { crate::error::PutImageScanningConfigurationError { meta: generic, kind: crate::error::PutImageScanningConfigurationErrorKind::InvalidParameterException( { #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::PutImageScanningConfigurationError::unhandled)?; output.build() }, ), } } "RepositoryNotFoundException" => crate::error::PutImageScanningConfigurationError { meta: generic, kind: crate::error::PutImageScanningConfigurationErrorKind::RepositoryNotFoundException( { #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageScanningConfigurationError::unhandled)?; output.build() }, ), }, "ServerException" => crate::error::PutImageScanningConfigurationError { meta: generic, kind: crate::error::PutImageScanningConfigurationErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageScanningConfigurationError::unhandled)?; output.build() }), }, _ => crate::error::PutImageScanningConfigurationError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_image_scanning_configuration_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::PutImageScanningConfigurationOutput, crate::error::PutImageScanningConfigurationError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::put_image_scanning_configuration_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_put_image_scanning_configuration( response.body().as_ref(), output, ) .map_err(crate::error::PutImageScanningConfigurationError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_image_tag_mutability_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::PutImageTagMutabilityOutput, crate::error::PutImageTagMutabilityError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::PutImageTagMutabilityError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::PutImageTagMutabilityError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::PutImageTagMutabilityError { meta: generic, kind: crate::error::PutImageTagMutabilityErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageTagMutabilityError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::PutImageTagMutabilityError { meta: generic, kind: crate::error::PutImageTagMutabilityErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageTagMutabilityError::unhandled)?; output.build() }), }, "ServerException" => crate::error::PutImageTagMutabilityError { meta: generic, kind: crate::error::PutImageTagMutabilityErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutImageTagMutabilityError::unhandled)?; output.build() }), }, _ => crate::error::PutImageTagMutabilityError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_image_tag_mutability_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::PutImageTagMutabilityOutput, crate::error::PutImageTagMutabilityError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::put_image_tag_mutability_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_put_image_tag_mutability( response.body().as_ref(), output, ) .map_err(crate::error::PutImageTagMutabilityError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_lifecycle_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::PutLifecyclePolicyOutput, crate::error::PutLifecyclePolicyError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::PutLifecyclePolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::PutLifecyclePolicyError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::PutLifecyclePolicyError { meta: generic, kind: crate::error::PutLifecyclePolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutLifecyclePolicyError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::PutLifecyclePolicyError { meta: generic, kind: crate::error::PutLifecyclePolicyErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutLifecyclePolicyError::unhandled)?; output.build() }), }, "ServerException" => crate::error::PutLifecyclePolicyError { meta: generic, kind: crate::error::PutLifecyclePolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutLifecyclePolicyError::unhandled)?; output.build() }), }, _ => crate::error::PutLifecyclePolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_lifecycle_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::PutLifecyclePolicyOutput, crate::error::PutLifecyclePolicyError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::put_lifecycle_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_put_lifecycle_policy( response.body().as_ref(), output, ) .map_err(crate::error::PutLifecyclePolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_registry_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::PutRegistryPolicyOutput, crate::error::PutRegistryPolicyError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::PutRegistryPolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::PutRegistryPolicyError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::PutRegistryPolicyError { meta: generic, kind: crate::error::PutRegistryPolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutRegistryPolicyError::unhandled)?; output.build() }), }, "ServerException" => crate::error::PutRegistryPolicyError { meta: generic, kind: crate::error::PutRegistryPolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutRegistryPolicyError::unhandled)?; output.build() }), }, _ => crate::error::PutRegistryPolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_registry_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::PutRegistryPolicyOutput, crate::error::PutRegistryPolicyError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::put_registry_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_put_registry_policy( response.body().as_ref(), output, ) .map_err(crate::error::PutRegistryPolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_replication_configuration_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::PutReplicationConfigurationOutput, crate::error::PutReplicationConfigurationError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::PutReplicationConfigurationError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => { return Err(crate::error::PutReplicationConfigurationError::unhandled( generic, )) } }; Err(match error_code { "InvalidParameterException" => crate::error::PutReplicationConfigurationError { meta: generic, kind: crate::error::PutReplicationConfigurationErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutReplicationConfigurationError::unhandled)?; output.build() }), }, "ServerException" => crate::error::PutReplicationConfigurationError { meta: generic, kind: crate::error::PutReplicationConfigurationErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutReplicationConfigurationError::unhandled)?; output.build() }), }, "ValidationException" => crate::error::PutReplicationConfigurationError { meta: generic, kind: crate::error::PutReplicationConfigurationErrorKind::ValidationException({ #[allow(unused_mut)] let mut output = crate::error::validation_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_validation_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::PutReplicationConfigurationError::unhandled)?; output.build() }), }, _ => crate::error::PutReplicationConfigurationError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_put_replication_configuration_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::PutReplicationConfigurationOutput, crate::error::PutReplicationConfigurationError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::put_replication_configuration_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_put_replication_configuration( response.body().as_ref(), output, ) .map_err(crate::error::PutReplicationConfigurationError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_set_repository_policy_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::SetRepositoryPolicyOutput, crate::error::SetRepositoryPolicyError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::SetRepositoryPolicyError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::SetRepositoryPolicyError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::SetRepositoryPolicyError { meta: generic, kind: crate::error::SetRepositoryPolicyErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::SetRepositoryPolicyError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::SetRepositoryPolicyError { meta: generic, kind: crate::error::SetRepositoryPolicyErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::SetRepositoryPolicyError::unhandled)?; output.build() }), }, "ServerException" => crate::error::SetRepositoryPolicyError { meta: generic, kind: crate::error::SetRepositoryPolicyErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::SetRepositoryPolicyError::unhandled)?; output.build() }), }, _ => crate::error::SetRepositoryPolicyError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_set_repository_policy_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::SetRepositoryPolicyOutput, crate::error::SetRepositoryPolicyError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::set_repository_policy_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_set_repository_policy( response.body().as_ref(), output, ) .map_err(crate::error::SetRepositoryPolicyError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_start_image_scan_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::StartImageScanOutput, crate::error::StartImageScanError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::StartImageScanError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::StartImageScanError::unhandled(generic)), }; Err(match error_code { "ImageNotFoundException" => crate::error::StartImageScanError { meta: generic, kind: crate::error::StartImageScanErrorKind::ImageNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::image_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_image_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::StartImageScanError::unhandled)?; output.build() }), }, "InvalidParameterException" => crate::error::StartImageScanError { meta: generic, kind: crate::error::StartImageScanErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::StartImageScanError::unhandled)?; output.build() }), }, "LimitExceededException" => crate::error::StartImageScanError { meta: generic, kind: crate::error::StartImageScanErrorKind::LimitExceededException({ #[allow(unused_mut)] let mut output = crate::error::limit_exceeded_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::StartImageScanError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::StartImageScanError { meta: generic, kind: crate::error::StartImageScanErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::StartImageScanError::unhandled)?; output.build() }), }, "ServerException" => crate::error::StartImageScanError { meta: generic, kind: crate::error::StartImageScanErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::StartImageScanError::unhandled)?; output.build() }), }, "UnsupportedImageTypeException" => { crate::error::StartImageScanError { meta: generic, kind: crate::error::StartImageScanErrorKind::UnsupportedImageTypeException({ #[allow(unused_mut)] let mut output = crate::error::unsupported_image_type_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_unsupported_image_type_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::StartImageScanError::unhandled)?; output.build() }), } } _ => crate::error::StartImageScanError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_start_image_scan_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::StartImageScanOutput, crate::error::StartImageScanError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::start_image_scan_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_start_image_scan(response.body().as_ref(), output) .map_err(crate::error::StartImageScanError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_start_lifecycle_policy_preview_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::StartLifecyclePolicyPreviewOutput, crate::error::StartLifecyclePolicyPreviewError, > { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::StartLifecyclePolicyPreviewError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => { return Err(crate::error::StartLifecyclePolicyPreviewError::unhandled( generic, )) } }; Err(match error_code { "InvalidParameterException" => crate::error::StartLifecyclePolicyPreviewError { meta: generic, kind: crate::error::StartLifecyclePolicyPreviewErrorKind::InvalidParameterException({ #[allow(unused_mut)]let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::StartLifecyclePolicyPreviewError::unhandled)?; output.build() })}, "LifecyclePolicyNotFoundException" => crate::error::StartLifecyclePolicyPreviewError { meta: generic, kind: crate::error::StartLifecyclePolicyPreviewErrorKind::LifecyclePolicyNotFoundException({ #[allow(unused_mut)]let mut output = crate::error::lifecycle_policy_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_lifecycle_policy_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::StartLifecyclePolicyPreviewError::unhandled)?; output.build() })}, "LifecyclePolicyPreviewInProgressException" => crate::error::StartLifecyclePolicyPreviewError { meta: generic, kind: crate::error::StartLifecyclePolicyPreviewErrorKind::LifecyclePolicyPreviewInProgressException({ #[allow(unused_mut)]let mut output = crate::error::lifecycle_policy_preview_in_progress_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_lifecycle_policy_preview_in_progress_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::StartLifecyclePolicyPreviewError::unhandled)?; output.build() })}, "RepositoryNotFoundException" => crate::error::StartLifecyclePolicyPreviewError { meta: generic, kind: crate::error::StartLifecyclePolicyPreviewErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)]let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::StartLifecyclePolicyPreviewError::unhandled)?; output.build() })}, "ServerException" => crate::error::StartLifecyclePolicyPreviewError { meta: generic, kind: crate::error::StartLifecyclePolicyPreviewErrorKind::ServerException({ #[allow(unused_mut)]let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::StartLifecyclePolicyPreviewError::unhandled)?; output.build() })}, _ => crate::error::StartLifecyclePolicyPreviewError::generic(generic) }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_start_lifecycle_policy_preview_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result< crate::output::StartLifecyclePolicyPreviewOutput, crate::error::StartLifecyclePolicyPreviewError, > { Ok({ #[allow(unused_mut)] let mut output = crate::output::start_lifecycle_policy_preview_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_start_lifecycle_policy_preview( response.body().as_ref(), output, ) .map_err(crate::error::StartLifecyclePolicyPreviewError::unhandled)?; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_tag_resource_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::TagResourceError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::TagResourceError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::TagResourceError { meta: generic, kind: crate::error::TagResourceErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::TagResourceError::unhandled)?; output.build() }), }, "InvalidTagParameterException" => crate::error::TagResourceError { meta: generic, kind: crate::error::TagResourceErrorKind::InvalidTagParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_tag_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_tag_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::TagResourceError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::TagResourceError { meta: generic, kind: crate::error::TagResourceErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::TagResourceError::unhandled)?; output.build() }), }, "ServerException" => crate::error::TagResourceError { meta: generic, kind: crate::error::TagResourceErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::TagResourceError::unhandled)?; output.build() }), }, "TooManyTagsException" => crate::error::TagResourceError { meta: generic, kind: crate::error::TagResourceErrorKind::TooManyTagsException({ #[allow(unused_mut)] let mut output = crate::error::too_many_tags_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_too_many_tags_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::TagResourceError::unhandled)?; output.build() }), }, _ => crate::error::TagResourceError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_tag_resource_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::tag_resource_output::Builder::default(); let _ = response; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_untag_resource_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::UntagResourceError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::UntagResourceError::unhandled(generic)), }; Err(match error_code { "InvalidParameterException" => crate::error::UntagResourceError { meta: generic, kind: crate::error::UntagResourceErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UntagResourceError::unhandled)?; output.build() }), }, "InvalidTagParameterException" => crate::error::UntagResourceError { meta: generic, kind: crate::error::UntagResourceErrorKind::InvalidTagParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_tag_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_tag_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UntagResourceError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::UntagResourceError { meta: generic, kind: crate::error::UntagResourceErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UntagResourceError::unhandled)?; output.build() }), }, "ServerException" => crate::error::UntagResourceError { meta: generic, kind: crate::error::UntagResourceErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UntagResourceError::unhandled)?; output.build() }), }, "TooManyTagsException" => crate::error::UntagResourceError { meta: generic, kind: crate::error::UntagResourceErrorKind::TooManyTagsException({ #[allow(unused_mut)] let mut output = crate::error::too_many_tags_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_too_many_tags_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UntagResourceError::unhandled)?; output.build() }), }, _ => crate::error::UntagResourceError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_untag_resource_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::untag_resource_output::Builder::default(); let _ = response; output.build() }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_upload_layer_part_error( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::UploadLayerPartOutput, crate::error::UploadLayerPartError> { let generic = crate::json_deser::parse_generic_error(&response) .map_err(crate::error::UploadLayerPartError::unhandled)?; let error_code = match generic.code() { Some(code) => code, None => return Err(crate::error::UploadLayerPartError::unhandled(generic)), }; Err(match error_code { "InvalidLayerPartException" => crate::error::UploadLayerPartError { meta: generic, kind: crate::error::UploadLayerPartErrorKind::InvalidLayerPartException({ #[allow(unused_mut)] let mut output = crate::error::invalid_layer_part_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_layer_part_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UploadLayerPartError::unhandled)?; output.build() }), }, "InvalidParameterException" => crate::error::UploadLayerPartError { meta: generic, kind: crate::error::UploadLayerPartErrorKind::InvalidParameterException({ #[allow(unused_mut)] let mut output = crate::error::invalid_parameter_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_invalid_parameter_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UploadLayerPartError::unhandled)?; output.build() }), }, "KmsException" => crate::error::UploadLayerPartError { meta: generic, kind: crate::error::UploadLayerPartErrorKind::KmsException({ #[allow(unused_mut)] let mut output = crate::error::kms_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_kms_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UploadLayerPartError::unhandled)?; output.build() }), }, "LimitExceededException" => crate::error::UploadLayerPartError { meta: generic, kind: crate::error::UploadLayerPartErrorKind::LimitExceededException({ #[allow(unused_mut)] let mut output = crate::error::limit_exceeded_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UploadLayerPartError::unhandled)?; output.build() }), }, "RepositoryNotFoundException" => crate::error::UploadLayerPartError { meta: generic, kind: crate::error::UploadLayerPartErrorKind::RepositoryNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::repository_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_repository_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UploadLayerPartError::unhandled)?; output.build() }), }, "ServerException" => crate::error::UploadLayerPartError { meta: generic, kind: crate::error::UploadLayerPartErrorKind::ServerException({ #[allow(unused_mut)] let mut output = crate::error::server_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_server_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UploadLayerPartError::unhandled)?; output.build() }), }, "UploadNotFoundException" => crate::error::UploadLayerPartError { meta: generic, kind: crate::error::UploadLayerPartErrorKind::UploadNotFoundException({ #[allow(unused_mut)] let mut output = crate::error::upload_not_found_exception::Builder::default(); let _ = response; output = crate::json_deser::deser_structure_upload_not_found_exceptionjson_err( response.body().as_ref(), output, ) .map_err(crate::error::UploadLayerPartError::unhandled)?; output.build() }), }, _ => crate::error::UploadLayerPartError::generic(generic), }) } #[allow(clippy::unnecessary_wraps)] pub fn parse_upload_layer_part_response( response: &http::Response<bytes::Bytes>, ) -> std::result::Result<crate::output::UploadLayerPartOutput, crate::error::UploadLayerPartError> { Ok({ #[allow(unused_mut)] let mut output = crate::output::upload_layer_part_output::Builder::default(); let _ = response; output = crate::json_deser::deser_operation_upload_layer_part(response.body().as_ref(), output) .map_err(crate::error::UploadLayerPartError::unhandled)?; output.build() }) }
44.438753
220
0.592378
d70fbaa0bc763a7d48196e83ebf5d07f18dce7a3
1,361
use serde_json::Value; use super::super::schema; use super::super::validators; #[allow(missing_copy_implementations)] pub struct UniqueItems; impl super::Keyword for UniqueItems { fn compile(&self, def: &Value, ctx: &schema::WalkContext<'_>) -> super::KeywordResult { let uniq = keyword_key_exists!(def, "uniqueItems"); if uniq.is_boolean() { if uniq.as_bool().unwrap() { Ok(Some(Box::new(validators::UniqueItems))) } else { Ok(None) } } else { Err(schema::SchemaError::Malformed { path: ctx.fragment.join("/"), detail: "The value of pattern MUST be boolean".to_string(), }) } } } #[cfg(test)] use super::super::builder; #[cfg(test)] use super::super::scope; #[cfg(test)] use serde_json::to_value; #[test] fn validate_unique_items() { let mut scope = scope::Scope::new(); let schema = scope .compile_and_return(builder::schema(|s| s.unique_items(true)).into_json(), true) .ok() .unwrap();; assert_eq!( schema .validate(&to_value(&[1, 2, 3, 4]).unwrap()) .is_valid(), true ); assert_eq!( schema .validate(&to_value(&[1, 1, 3, 4]).unwrap()) .is_valid(), false ); }
24.745455
91
0.542983
01df9fbb37c83283bd508452e8d4b4bae632591c
524
use crate::util::read_lines; pub fn run() { let lines = read_lines("./src/problems/day1/input.txt").unwrap(); let entries: Vec<u32> = lines .into_iter() .map(|entry| entry.unwrap().parse::<u32>().unwrap()) .collect(); for i in &entries { for j in &entries { for k in &entries { if i + j + k == 2020 { println!("{} * {} * {} = {}", i, j, k, i * j * k); return; } } } } }
24.952381
70
0.408397
2697f988ac55bdba930e7185c763a334d901e904
10,863
use std::{convert::TryInto, error::Error, fmt, str::FromStr}; use bech32::{self, FromBase32, ToBase32, Variant}; use crate::{kind::*, AddressKind, Network, ZcashAddress}; /// An error while attempting to parse a string as a Zcash address. #[derive(Debug, PartialEq)] pub enum ParseError { /// The string is an invalid encoding. InvalidEncoding, /// The string is not a Zcash address. NotZcash, /// Errors specific to unified addresses. Unified(unified::ParseError), } impl From<unified::ParseError> for ParseError { fn from(e: unified::ParseError) -> Self { match e { unified::ParseError::InvalidEncoding => Self::InvalidEncoding, _ => Self::Unified(e), } } } impl fmt::Display for ParseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ParseError::InvalidEncoding => write!(f, "Invalid encoding"), ParseError::NotZcash => write!(f, "Not a Zcash address"), ParseError::Unified(e) => e.fmt(f), } } } impl Error for ParseError {} impl FromStr for ZcashAddress { type Err = ParseError; /// Attempts to parse the given string as a Zcash address. fn from_str(s: &str) -> Result<Self, Self::Err> { // Remove leading and trailing whitespace, to handle copy-paste errors. let s = s.trim(); // Most Zcash addresses use Bech32 or Bech32m, so try those first. match bech32::decode(s) { Ok((hrp, data, Variant::Bech32m)) => { // If we reached this point, the encoding is supposed to be valid Bech32m. let data = Vec::<u8>::from_base32(&data).map_err(|_| ParseError::InvalidEncoding)?; let net = match hrp.as_str() { unified::MAINNET => Network::Main, unified::TESTNET => Network::Test, unified::REGTEST => Network::Regtest, // We will not define new Bech32m address encodings. _ => { return Err(ParseError::NotZcash); } }; return (hrp.as_str(), &data[..]) .try_into() .map(AddressKind::Unified) .map_err(|_| ParseError::InvalidEncoding) .map(|kind| ZcashAddress { net, kind }); } Ok((hrp, data, Variant::Bech32)) => { // If we reached this point, the encoding is supposed to be valid Bech32. let data = Vec::<u8>::from_base32(&data).map_err(|_| ParseError::InvalidEncoding)?; let net = match hrp.as_str() { sapling::MAINNET => Network::Main, sapling::TESTNET => Network::Test, sapling::REGTEST => Network::Regtest, // We will not define new Bech32 address encodings. _ => { return Err(ParseError::NotZcash); } }; return data[..] .try_into() .map(AddressKind::Sapling) .map_err(|_| ParseError::InvalidEncoding) .map(|kind| ZcashAddress { net, kind }); } Err(_) => (), } // The rest use Base58Check. if let Ok(decoded) = bs58::decode(s).with_check(None).into_vec() { let net = match decoded[..2].try_into().unwrap() { sprout::MAINNET | p2pkh::MAINNET | p2sh::MAINNET => Network::Main, sprout::TESTNET | p2pkh::TESTNET | p2sh::TESTNET => Network::Test, // We will not define new Base58Check address encodings. _ => return Err(ParseError::NotZcash), }; return match decoded[..2].try_into().unwrap() { sprout::MAINNET | sprout::TESTNET => { decoded[2..].try_into().map(AddressKind::Sprout) } p2pkh::MAINNET | p2pkh::TESTNET => decoded[2..].try_into().map(AddressKind::P2pkh), p2sh::MAINNET | p2sh::TESTNET => decoded[2..].try_into().map(AddressKind::P2sh), _ => unreachable!(), } .map_err(|_| ParseError::InvalidEncoding) .map(|kind| ZcashAddress { kind, net }); }; // If it's not valid Bech32 or Base58Check, it's not a Zcash address. Err(ParseError::NotZcash) } } fn encode_bech32m(hrp: &str, data: &[u8]) -> String { bech32::encode(hrp, data.to_base32(), Variant::Bech32m).expect("hrp is invalid") } fn encode_bech32(hrp: &str, data: &[u8]) -> String { bech32::encode(hrp, data.to_base32(), Variant::Bech32).expect("hrp is invalid") } fn encode_b58(prefix: [u8; 2], data: &[u8]) -> String { let mut bytes = Vec::with_capacity(2 + data.len()); bytes.extend_from_slice(&prefix); bytes.extend_from_slice(data); bs58::encode(bytes).with_check().into_string() } impl fmt::Display for ZcashAddress { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let encoded = match &self.kind { AddressKind::Sprout(data) => encode_b58( match self.net { Network::Main => sprout::MAINNET, Network::Test | Network::Regtest => sprout::TESTNET, }, data, ), AddressKind::Sapling(data) => encode_bech32( match self.net { Network::Main => sapling::MAINNET, Network::Test => sapling::TESTNET, Network::Regtest => sapling::REGTEST, }, data, ), AddressKind::Unified(data) => { let hrp = match self.net { Network::Main => unified::MAINNET, Network::Test => unified::TESTNET, Network::Regtest => unified::REGTEST, }; encode_bech32m(hrp, &data.to_bytes(hrp)) } AddressKind::P2pkh(data) => encode_b58( match self.net { Network::Main => p2pkh::MAINNET, Network::Test | Network::Regtest => p2pkh::TESTNET, }, data, ), AddressKind::P2sh(data) => encode_b58( match self.net { Network::Main => p2sh::MAINNET, Network::Test | Network::Regtest => p2sh::TESTNET, }, data, ), }; write!(f, "{}", encoded) } } #[cfg(test)] mod tests { use super::*; use crate::kind::unified; fn encoding(encoded: &str, decoded: ZcashAddress) { assert_eq!(decoded.to_string(), encoded); assert_eq!(encoded.parse(), Ok(decoded)); } #[test] fn sprout() { encoding( "zc8E5gYid86n4bo2Usdq1cpr7PpfoJGzttwBHEEgGhGkLUg7SPPVFNB2AkRFXZ7usfphup5426dt1buMmY3fkYeRrQGLa8y", ZcashAddress { net: Network::Main, kind: AddressKind::Sprout([0; 64]) }, ); encoding( "ztJ1EWLKcGwF2S4NA17pAJVdco8Sdkz4AQPxt1cLTEfNuyNswJJc2BbBqYrsRZsp31xbVZwhF7c7a2L9jsF3p3ZwRWpqqyS", ZcashAddress { net: Network::Test, kind: AddressKind::Sprout([0; 64]) }, ); } #[test] fn sapling() { encoding( "zs1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpq6d8g", ZcashAddress { net: Network::Main, kind: AddressKind::Sapling([0; 43]), }, ); encoding( "ztestsapling1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqfhgwqu", ZcashAddress { net: Network::Test, kind: AddressKind::Sapling([0; 43]), }, ); encoding( "zregtestsapling1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqknpr3m", ZcashAddress { net: Network::Regtest, kind: AddressKind::Sapling([0; 43]), }, ); } #[test] fn unified() { encoding( "u175h4qsgd8gujkevz283ka89ul6r2kr25xvachlt5w5srewdwcjacdtm3ku06jazzwk2klezj3kfy2jc9p65l5fgvjhekmnd4myk2m7xn", ZcashAddress { net: Network::Main, kind: AddressKind::Unified(unified::Address(vec![unified::Receiver::Sapling([0; 43])])), }, ); encoding( "utest193cmy6pcjrw6cg8rqcxgq6z2095a2mc0hqu0g0gvnlf83em0szx23qtv9722s6qkssz80try4tynp73u9gee3zskye0ztzdz0snrxw7n", ZcashAddress { net: Network::Test, kind: AddressKind::Unified(unified::Address(vec![unified::Receiver::Sapling([0; 43])])), }, ); encoding( "uregtest1dl4mka5saz8xwnf0pttr637jx6su0nejfhzcz3metcmqyzdgktsmm09ese6ew794xqcyp6476nuspvdvx2xk6gn2euvu7fdmrvwl87zx", ZcashAddress { net: Network::Regtest, kind: AddressKind::Unified(unified::Address(vec![unified::Receiver::Sapling([0; 43])])), }, ); } #[test] fn transparent() { encoding( "t1Hsc1LR8yKnbbe3twRp88p6vFfC5t7DLbs", ZcashAddress { net: Network::Main, kind: AddressKind::P2pkh([0; 20]), }, ); encoding( "tm9iMLAuYMzJ6jtFLcA7rzUmfreGuKvr7Ma", ZcashAddress { net: Network::Test, kind: AddressKind::P2pkh([0; 20]), }, ); encoding( "t3JZcvsuaXE6ygokL4XUiZSTrQBUoPYFnXJ", ZcashAddress { net: Network::Main, kind: AddressKind::P2sh([0; 20]), }, ); encoding( "t26YoyZ1iPgiMEWL4zGUm74eVWfhyDMXzY2", ZcashAddress { net: Network::Test, kind: AddressKind::P2sh([0; 20]), }, ); } #[test] fn whitespace() { assert_eq!( " t1Hsc1LR8yKnbbe3twRp88p6vFfC5t7DLbs".parse(), Ok(ZcashAddress { net: Network::Main, kind: AddressKind::P2pkh([0; 20]) }), ); assert_eq!( "t1Hsc1LR8yKnbbe3twRp88p6vFfC5t7DLbs ".parse(), Ok(ZcashAddress { net: Network::Main, kind: AddressKind::P2pkh([0; 20]) }), ); assert_eq!( "something t1Hsc1LR8yKnbbe3twRp88p6vFfC5t7DLbs".parse::<ZcashAddress>(), Err(ParseError::NotZcash), ); } }
35.5
128
0.520851
1647d31de8b46585a794d5a05b513a516ceeebdb
302
//Author Josiah Bull, Copyright 2021 //! A collection of small useful helper functions. use std::time::{SystemTime, UNIX_EPOCH}; ///Return the current time, in seconds. pub fn get_time_seconds() -> i64 { SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64 }
37.75
95
0.738411
2148ea8fde8af4d7bdca7131788ea0be178837a1
5,400
use pyo3::prelude::*; use pyo3::types::{ PyDate, PyDateAccess, PyDateTime, PyDelta, PyDeltaAccess, PyTime, PyTimeAccess, PyTuple, PyTzInfo, }; use pyo3::wrap_pyfunction; #[pyfunction] fn make_date(py: Python, year: i32, month: u8, day: u8) -> PyResult<&PyDate> { PyDate::new(py, year, month, day) } #[pyfunction] fn get_date_tuple<'p>(py: Python<'p>, d: &PyDate) -> &'p PyTuple { PyTuple::new( py, &[d.get_year(), d.get_month() as i32, d.get_day() as i32], ) } #[pyfunction] fn date_from_timestamp(py: Python, timestamp: i64) -> PyResult<&PyDate> { PyDate::from_timestamp(py, timestamp) } #[pyfunction] fn make_time<'p>( py: Python<'p>, hour: u8, minute: u8, second: u8, microsecond: u32, tzinfo: Option<&PyTzInfo>, ) -> PyResult<&'p PyTime> { PyTime::new( py, hour, minute, second, microsecond, tzinfo.map(|o| o.to_object(py)).as_ref(), ) } #[cfg(not(PyPy))] #[pyfunction] fn time_with_fold<'p>( py: Python<'p>, hour: u8, minute: u8, second: u8, microsecond: u32, tzinfo: Option<&PyTzInfo>, fold: bool, ) -> PyResult<&'p PyTime> { PyTime::new_with_fold( py, hour, minute, second, microsecond, tzinfo.map(|o| o.to_object(py)).as_ref(), fold, ) } #[pyfunction] fn get_time_tuple<'p>(py: Python<'p>, dt: &PyTime) -> &'p PyTuple { PyTuple::new( py, &[ dt.get_hour() as u32, dt.get_minute() as u32, dt.get_second() as u32, dt.get_microsecond(), ], ) } #[cfg(not(PyPy))] #[pyfunction] fn get_time_tuple_fold<'p>(py: Python<'p>, dt: &PyTime) -> &'p PyTuple { PyTuple::new( py, &[ dt.get_hour() as u32, dt.get_minute() as u32, dt.get_second() as u32, dt.get_microsecond(), dt.get_fold() as u32, ], ) } #[pyfunction] fn make_delta(py: Python, days: i32, seconds: i32, microseconds: i32) -> PyResult<&PyDelta> { PyDelta::new(py, days, seconds, microseconds, true) } #[pyfunction] fn get_delta_tuple<'p>(py: Python<'p>, delta: &PyDelta) -> &'p PyTuple { PyTuple::new( py, &[ delta.get_days(), delta.get_seconds(), delta.get_microseconds(), ], ) } #[allow(clippy::too_many_arguments)] #[pyfunction] fn make_datetime<'p>( py: Python<'p>, year: i32, month: u8, day: u8, hour: u8, minute: u8, second: u8, microsecond: u32, tzinfo: Option<&PyTzInfo>, ) -> PyResult<&'p PyDateTime> { PyDateTime::new( py, year, month, day, hour, minute, second, microsecond, tzinfo.map(|o| (o.to_object(py))).as_ref(), ) } #[pyfunction] fn get_datetime_tuple<'p>(py: Python<'p>, dt: &PyDateTime) -> &'p PyTuple { PyTuple::new( py, &[ dt.get_year(), dt.get_month() as i32, dt.get_day() as i32, dt.get_hour() as i32, dt.get_minute() as i32, dt.get_second() as i32, dt.get_microsecond() as i32, ], ) } #[cfg(not(PyPy))] #[pyfunction] fn get_datetime_tuple_fold<'p>(py: Python<'p>, dt: &PyDateTime) -> &'p PyTuple { PyTuple::new( py, &[ dt.get_year(), dt.get_month() as i32, dt.get_day() as i32, dt.get_hour() as i32, dt.get_minute() as i32, dt.get_second() as i32, dt.get_microsecond() as i32, dt.get_fold() as i32, ], ) } #[pyfunction] fn datetime_from_timestamp<'p>( py: Python<'p>, ts: f64, tz: Option<&PyTzInfo>, ) -> PyResult<&'p PyDateTime> { PyDateTime::from_timestamp(py, ts, tz) } #[pyclass(extends=PyTzInfo)] pub struct TzClass {} #[pymethods] impl TzClass { #[new] fn new() -> Self { TzClass {} } fn utcoffset<'p>(&self, py: Python<'p>, _dt: &PyDateTime) -> PyResult<&'p PyDelta> { PyDelta::new(py, 0, 3600, 0, true) } fn tzname(&self, _py: Python<'_>, _dt: &PyDateTime) -> PyResult<String> { Ok(String::from("+01:00")) } fn dst(&self, _py: Python<'_>, _dt: &PyDateTime) -> PyResult<Option<&PyDelta>> { Ok(None) } } #[pymodule] fn datetime(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(make_date, m)?)?; m.add_function(wrap_pyfunction!(get_date_tuple, m)?)?; m.add_function(wrap_pyfunction!(date_from_timestamp, m)?)?; m.add_function(wrap_pyfunction!(make_time, m)?)?; m.add_function(wrap_pyfunction!(get_time_tuple, m)?)?; m.add_function(wrap_pyfunction!(make_delta, m)?)?; m.add_function(wrap_pyfunction!(get_delta_tuple, m)?)?; m.add_function(wrap_pyfunction!(make_datetime, m)?)?; m.add_function(wrap_pyfunction!(get_datetime_tuple, m)?)?; m.add_function(wrap_pyfunction!(datetime_from_timestamp, m)?)?; // Functions not supported by PyPy #[cfg(not(PyPy))] { m.add_function(wrap_pyfunction!(time_with_fold, m)?)?; m.add_function(wrap_pyfunction!(get_time_tuple_fold, m)?)?; m.add_function(wrap_pyfunction!(get_datetime_tuple_fold, m)?)?; } m.add_class::<TzClass>()?; Ok(()) }
23.580786
93
0.558148
56b2ecd19d946938e9f9cef1ea3845f0c3f513bc
11,133
#[doc = "Register `INTENCLR` reader"] pub struct R(crate::R<INTENCLR_SPEC>); impl core::ops::Deref for R { type Target = crate::R<INTENCLR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<INTENCLR_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<INTENCLR_SPEC>) -> Self { R(reader) } } #[doc = "Register `INTENCLR` writer"] pub struct W(crate::W<INTENCLR_SPEC>); impl core::ops::Deref for W { type Target = crate::W<INTENCLR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<INTENCLR_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<INTENCLR_SPEC>) -> Self { W(writer) } } #[doc = "Write '1' to disable interrupt for END event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum END_A { #[doc = "0: Read: Disabled"] DISABLED = 0, #[doc = "1: Read: Enabled"] ENABLED = 1, } impl From<END_A> for bool { #[inline(always)] fn from(variant: END_A) -> Self { variant as u8 != 0 } } #[doc = "Field `END` reader - Write '1' to disable interrupt for END event"] pub struct END_R(crate::FieldReader<bool, END_A>); impl END_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { END_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> END_A { match self.bits { false => END_A::DISABLED, true => END_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == END_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == END_A::ENABLED } } impl core::ops::Deref for END_R { type Target = crate::FieldReader<bool, END_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Write '1' to disable interrupt for END event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum END_AW { #[doc = "1: Disable"] CLEAR = 1, } impl From<END_AW> for bool { #[inline(always)] fn from(variant: END_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `END` writer - Write '1' to disable interrupt for END event"] pub struct END_W<'a> { w: &'a mut W, } impl<'a> END_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: END_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(END_AW::CLEAR) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Write '1' to disable interrupt for RESOLVED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RESOLVED_A { #[doc = "0: Read: Disabled"] DISABLED = 0, #[doc = "1: Read: Enabled"] ENABLED = 1, } impl From<RESOLVED_A> for bool { #[inline(always)] fn from(variant: RESOLVED_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RESOLVED` reader - Write '1' to disable interrupt for RESOLVED event"] pub struct RESOLVED_R(crate::FieldReader<bool, RESOLVED_A>); impl RESOLVED_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { RESOLVED_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RESOLVED_A { match self.bits { false => RESOLVED_A::DISABLED, true => RESOLVED_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == RESOLVED_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == RESOLVED_A::ENABLED } } impl core::ops::Deref for RESOLVED_R { type Target = crate::FieldReader<bool, RESOLVED_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Write '1' to disable interrupt for RESOLVED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RESOLVED_AW { #[doc = "1: Disable"] CLEAR = 1, } impl From<RESOLVED_AW> for bool { #[inline(always)] fn from(variant: RESOLVED_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `RESOLVED` writer - Write '1' to disable interrupt for RESOLVED event"] pub struct RESOLVED_W<'a> { w: &'a mut W, } impl<'a> RESOLVED_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RESOLVED_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(RESOLVED_AW::CLEAR) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Write '1' to disable interrupt for NOTRESOLVED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum NOTRESOLVED_A { #[doc = "0: Read: Disabled"] DISABLED = 0, #[doc = "1: Read: Enabled"] ENABLED = 1, } impl From<NOTRESOLVED_A> for bool { #[inline(always)] fn from(variant: NOTRESOLVED_A) -> Self { variant as u8 != 0 } } #[doc = "Field `NOTRESOLVED` reader - Write '1' to disable interrupt for NOTRESOLVED event"] pub struct NOTRESOLVED_R(crate::FieldReader<bool, NOTRESOLVED_A>); impl NOTRESOLVED_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { NOTRESOLVED_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> NOTRESOLVED_A { match self.bits { false => NOTRESOLVED_A::DISABLED, true => NOTRESOLVED_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == NOTRESOLVED_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == NOTRESOLVED_A::ENABLED } } impl core::ops::Deref for NOTRESOLVED_R { type Target = crate::FieldReader<bool, NOTRESOLVED_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Write '1' to disable interrupt for NOTRESOLVED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum NOTRESOLVED_AW { #[doc = "1: Disable"] CLEAR = 1, } impl From<NOTRESOLVED_AW> for bool { #[inline(always)] fn from(variant: NOTRESOLVED_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `NOTRESOLVED` writer - Write '1' to disable interrupt for NOTRESOLVED event"] pub struct NOTRESOLVED_W<'a> { w: &'a mut W, } impl<'a> NOTRESOLVED_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: NOTRESOLVED_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(NOTRESOLVED_AW::CLEAR) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } impl R { #[doc = "Bit 0 - Write '1' to disable interrupt for END event"] #[inline(always)] pub fn end(&self) -> END_R { END_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Write '1' to disable interrupt for RESOLVED event"] #[inline(always)] pub fn resolved(&self) -> RESOLVED_R { RESOLVED_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Write '1' to disable interrupt for NOTRESOLVED event"] #[inline(always)] pub fn notresolved(&self) -> NOTRESOLVED_R { NOTRESOLVED_R::new(((self.bits >> 2) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Write '1' to disable interrupt for END event"] #[inline(always)] pub fn end(&mut self) -> END_W { END_W { w: self } } #[doc = "Bit 1 - Write '1' to disable interrupt for RESOLVED event"] #[inline(always)] pub fn resolved(&mut self) -> RESOLVED_W { RESOLVED_W { w: self } } #[doc = "Bit 2 - Write '1' to disable interrupt for NOTRESOLVED event"] #[inline(always)] pub fn notresolved(&mut self) -> NOTRESOLVED_W { NOTRESOLVED_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Disable interrupt\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intenclr](index.html) module"] pub struct INTENCLR_SPEC; impl crate::RegisterSpec for INTENCLR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [intenclr::R](R) reader structure"] impl crate::Readable for INTENCLR_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [intenclr::W](W) writer structure"] impl crate::Writable for INTENCLR_SPEC { type Writer = W; } #[doc = "`reset()` method sets INTENCLR to value 0"] impl crate::Resettable for INTENCLR_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
30.089189
406
0.584119
e402ebf550d526bc887d6964c521880f58c0dfba
3,430
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use cairo; use glib::translate::*; use gtk_sys; use pango; use std::fmt; use std::mem; use PageSetup; glib_wrapper! { pub struct PrintContext(Object<gtk_sys::GtkPrintContext, PrintContextClass>); match fn { get_type => || gtk_sys::gtk_print_context_get_type(), } } impl PrintContext { pub fn create_pango_context(&self) -> Option<pango::Context> { unsafe { from_glib_full(gtk_sys::gtk_print_context_create_pango_context( self.to_glib_none().0, )) } } pub fn create_pango_layout(&self) -> Option<pango::Layout> { unsafe { from_glib_full(gtk_sys::gtk_print_context_create_pango_layout( self.to_glib_none().0, )) } } pub fn get_cairo_context(&self) -> Option<cairo::Context> { unsafe { from_glib_none(gtk_sys::gtk_print_context_get_cairo_context( self.to_glib_none().0, )) } } pub fn get_dpi_x(&self) -> f64 { unsafe { gtk_sys::gtk_print_context_get_dpi_x(self.to_glib_none().0) } } pub fn get_dpi_y(&self) -> f64 { unsafe { gtk_sys::gtk_print_context_get_dpi_y(self.to_glib_none().0) } } pub fn get_hard_margins(&self) -> Option<(f64, f64, f64, f64)> { unsafe { let mut top = mem::MaybeUninit::uninit(); let mut bottom = mem::MaybeUninit::uninit(); let mut left = mem::MaybeUninit::uninit(); let mut right = mem::MaybeUninit::uninit(); let ret = from_glib(gtk_sys::gtk_print_context_get_hard_margins( self.to_glib_none().0, top.as_mut_ptr(), bottom.as_mut_ptr(), left.as_mut_ptr(), right.as_mut_ptr(), )); let top = top.assume_init(); let bottom = bottom.assume_init(); let left = left.assume_init(); let right = right.assume_init(); if ret { Some((top, bottom, left, right)) } else { None } } } pub fn get_height(&self) -> f64 { unsafe { gtk_sys::gtk_print_context_get_height(self.to_glib_none().0) } } pub fn get_page_setup(&self) -> Option<PageSetup> { unsafe { from_glib_none(gtk_sys::gtk_print_context_get_page_setup( self.to_glib_none().0, )) } } pub fn get_pango_fontmap(&self) -> Option<pango::FontMap> { unsafe { from_glib_none(gtk_sys::gtk_print_context_get_pango_fontmap( self.to_glib_none().0, )) } } pub fn get_width(&self) -> f64 { unsafe { gtk_sys::gtk_print_context_get_width(self.to_glib_none().0) } } pub fn set_cairo_context(&self, cr: &cairo::Context, dpi_x: f64, dpi_y: f64) { unsafe { gtk_sys::gtk_print_context_set_cairo_context( self.to_glib_none().0, mut_override(cr.to_glib_none().0), dpi_x, dpi_y, ); } } } impl fmt::Display for PrintContext { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "PrintContext") } }
28.583333
82
0.555685
0328db5a49cf3c90b4ab42f013ae328c056f975b
1,032
// This is part of a set of tests exploring the different ways a // `#[structural_match]` ADT might try to hold a // non-`#[structural_match]` in hidden manner that lets matches // through that we had intended to reject. // // See discussion on rust-lang/rust#62307 and rust-lang/rust#62339 #![warn(indirect_structural_match)] // run-pass struct NoDerive(i32); // This impl makes NoDerive irreflexive. impl PartialEq for NoDerive { fn eq(&self, _: &Self) -> bool { false } } impl Eq for NoDerive { } #[derive(PartialEq, Eq)] struct WrapInline<'a>(&'a &'a NoDerive); const WRAP_DOUBLY_INDIRECT_INLINE: & &WrapInline = & &WrapInline(& & NoDerive(0)); fn main() { match WRAP_DOUBLY_INDIRECT_INLINE { WRAP_DOUBLY_INDIRECT_INLINE => { panic!("WRAP_DOUBLY_INDIRECT_INLINE matched itself"); } //~^ WARN must be annotated with `#[derive(PartialEq, Eq)]` //~| WARN will become a hard error in a future release _ => { println!("WRAP_DOUBLY_INDIRECT_INLINE correctly did not match itself"); } } }
34.4
96
0.696705
dd44f060ff5c294c9f5c186a7cebb77e73844d3b
4,391
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. use std::path::Path; use std::sync::Arc; use tempdir::TempDir; use kvproto::metapb; use kvproto::raft_serverpb::RegionLocalState; use test_raftstore::*; use tikv::import::SSTImporter; use tikv::raftstore::coprocessor::CoprocessorHost; use tikv::raftstore::store::{bootstrap_store, fsm, keys, Engines, Peekable, SnapManager}; use tikv::server::Node; use tikv::storage::{ALL_CFS, CF_RAFT}; use tikv::util::rocksdb_util; use tikv::util::worker::{FutureWorker, Worker}; fn test_bootstrap_idempotent<T: Simulator>(cluster: &mut Cluster<T>) { // assume that there is a node bootstrap the cluster and add region in pd successfully cluster.add_first_region().unwrap(); // now at same time start the another node, and will recive cluster is not bootstrap // it will try to bootstrap with a new region, but will failed // the region number still 1 cluster.start(); cluster.check_regions_number(1); cluster.shutdown(); sleep_ms(500); cluster.start(); cluster.check_regions_number(1); } #[test] fn test_node_bootstrap_with_prepared_data() { // create a node let pd_client = Arc::new(TestPdClient::new(0, false)); let cfg = new_tikv_config(0); let (_, system) = fsm::create_raft_batch_system(&cfg.raft_store); let simulate_trans = SimulateTransport::new(ChannelTransport::new()); let tmp_path = TempDir::new("test_cluster").unwrap(); let engine = Arc::new( rocksdb_util::new_engine(tmp_path.path().to_str().unwrap(), None, ALL_CFS, None).unwrap(), ); let tmp_path_raft = tmp_path.path().join(Path::new("raft")); let raft_engine = Arc::new( rocksdb_util::new_engine(tmp_path_raft.to_str().unwrap(), None, &[], None).unwrap(), ); let engines = Engines::new(Arc::clone(&engine), Arc::clone(&raft_engine)); let tmp_mgr = TempDir::new("test_cluster").unwrap(); let mut node = Node::new(system, &cfg.server, &cfg.raft_store, Arc::clone(&pd_client)); let snap_mgr = SnapManager::new(tmp_mgr.path().to_str().unwrap(), Some(node.get_sendch())); let pd_worker = FutureWorker::new("test-pd-worker"); let local_reader = Worker::new("test-local-reader"); // assume there is a node has bootstrapped the cluster and add region in pd successfully bootstrap_with_first_region(Arc::clone(&pd_client)).unwrap(); // now anthoer node at same time begin bootstrap node, but panic after prepared bootstrap // now rocksDB must have some prepare data bootstrap_store(&engines, 0, 1).unwrap(); let region = node.prepare_bootstrap_cluster(&engines, 1).unwrap(); assert!(engine .get_msg::<metapb::Region>(keys::PREPARE_BOOTSTRAP_KEY) .unwrap() .is_some()); let region_state_key = keys::region_state_key(region.get_id()); assert!(engine .get_msg_cf::<RegionLocalState>(CF_RAFT, &region_state_key) .unwrap() .is_some()); // Create coprocessor. let coprocessor_host = CoprocessorHost::new(cfg.coprocessor, node.get_sendch()); let importer = { let dir = tmp_path.path().join("import-sst"); Arc::new(SSTImporter::new(dir).unwrap()) }; // try to restart this node, will clear the prepare data node.start( engines, simulate_trans, snap_mgr, pd_worker, local_reader, coprocessor_host, importer, ) .unwrap(); assert!(Arc::clone(&engine) .get_msg::<metapb::Region>(keys::PREPARE_BOOTSTRAP_KEY) .unwrap() .is_none()); assert!(engine .get_msg_cf::<RegionLocalState>(CF_RAFT, &region_state_key) .unwrap() .is_none()); assert_eq!(pd_client.get_regions_number() as u32, 1); node.stop().unwrap(); } #[test] fn test_node_bootstrap_idempotent() { let mut cluster = new_node_cluster(0, 3); test_bootstrap_idempotent(&mut cluster); }
35.991803
98
0.680027
4b3b68d91970576f59b4ce0211aa24e3f222fa06
17,717
use std::{ char, os::raw::c_int, ptr, sync::atomic::{AtomicBool, AtomicPtr, Ordering}, }; use crate::event::{ModifiersState, ScanCode, VirtualKeyCode}; use winapi::{ shared::minwindef::{HKL, HKL__, LPARAM, UINT, WPARAM}, um::winuser, }; fn key_pressed(vkey: c_int) -> bool { unsafe { (winuser::GetKeyState(vkey) & (1 << 15)) == (1 << 15) } } pub fn get_key_mods() -> ModifiersState { let filter_out_altgr = layout_uses_altgr() && key_pressed(winuser::VK_RMENU); let mut mods = ModifiersState::empty(); mods.set(ModifiersState::SHIFT, key_pressed(winuser::VK_SHIFT)); mods.set( ModifiersState::CTRL, key_pressed(winuser::VK_CONTROL) && !filter_out_altgr, ); mods.set( ModifiersState::ALT, key_pressed(winuser::VK_MENU) && !filter_out_altgr, ); mods.set( ModifiersState::LOGO, key_pressed(winuser::VK_LWIN) || key_pressed(winuser::VK_RWIN), ); mods } bitflags! { #[derive(Default)] pub struct ModifiersStateSide: u32 { const LSHIFT = 0b010 << 0; const RSHIFT = 0b001 << 0; const LCTRL = 0b010 << 3; const RCTRL = 0b001 << 3; const LALT = 0b010 << 6; const RALT = 0b001 << 6; const LLOGO = 0b010 << 9; const RLOGO = 0b001 << 9; } } impl ModifiersStateSide { pub fn filter_out_altgr(&self) -> ModifiersStateSide { match layout_uses_altgr() && self.contains(Self::RALT) { false => *self, true => *self & !(Self::LCTRL | Self::RCTRL | Self::LALT | Self::RALT), } } } impl From<ModifiersStateSide> for ModifiersState { fn from(side: ModifiersStateSide) -> Self { let mut state = ModifiersState::default(); state.set( Self::SHIFT, side.intersects(ModifiersStateSide::LSHIFT | ModifiersStateSide::RSHIFT), ); state.set( Self::CTRL, side.intersects(ModifiersStateSide::LCTRL | ModifiersStateSide::RCTRL), ); state.set( Self::ALT, side.intersects(ModifiersStateSide::LALT | ModifiersStateSide::RALT), ); state.set( Self::LOGO, side.intersects(ModifiersStateSide::LLOGO | ModifiersStateSide::RLOGO), ); state } } pub fn get_pressed_keys() -> impl Iterator<Item = c_int> { let mut keyboard_state = vec![0u8; 256]; unsafe { winuser::GetKeyboardState(keyboard_state.as_mut_ptr()) }; keyboard_state .into_iter() .enumerate() .filter(|(_, p)| (*p & (1 << 7)) != 0) // whether or not a key is pressed is communicated via the high-order bit .map(|(i, _)| i as c_int) } unsafe fn get_char(keyboard_state: &[u8; 256], v_key: u32, hkl: HKL) -> Option<char> { let mut unicode_bytes = [0u16; 5]; let len = winuser::ToUnicodeEx( v_key, 0, keyboard_state.as_ptr(), unicode_bytes.as_mut_ptr(), unicode_bytes.len() as _, 0, hkl, ); if len >= 1 { char::decode_utf16(unicode_bytes.iter().cloned()) .next() .and_then(|c| c.ok()) } else { None } } /// Figures out if the keyboard layout has an AltGr key instead of an Alt key. /// /// Unfortunately, the Windows API doesn't give a way for us to conveniently figure that out. So, /// we use a technique blatantly stolen from [the Firefox source code][source]: iterate over every /// possible virtual key and compare the `char` output when AltGr is pressed vs when it isn't. If /// pressing AltGr outputs characters that are different from the standard characters, the layout /// uses AltGr. Otherwise, it doesn't. /// /// [source]: https://github.com/mozilla/gecko-dev/blob/265e6721798a455604328ed5262f430cfcc37c2f/widget/windows/KeyboardLayout.cpp#L4356-L4416 fn layout_uses_altgr() -> bool { unsafe { static ACTIVE_LAYOUT: AtomicPtr<HKL__> = AtomicPtr::new(ptr::null_mut()); static USES_ALTGR: AtomicBool = AtomicBool::new(false); let hkl = winuser::GetKeyboardLayout(0); let old_hkl = ACTIVE_LAYOUT.swap(hkl, Ordering::SeqCst); if hkl == old_hkl { return USES_ALTGR.load(Ordering::SeqCst); } let mut keyboard_state_altgr = [0u8; 256]; // AltGr is an alias for Ctrl+Alt for... some reason. Whatever it is, those are the keypresses // we have to emulate to do an AltGr test. keyboard_state_altgr[winuser::VK_MENU as usize] = 0x80; keyboard_state_altgr[winuser::VK_CONTROL as usize] = 0x80; let keyboard_state_empty = [0u8; 256]; for v_key in 0..=255 { let key_noaltgr = get_char(&keyboard_state_empty, v_key, hkl); let key_altgr = get_char(&keyboard_state_altgr, v_key, hkl); if let (Some(noaltgr), Some(altgr)) = (key_noaltgr, key_altgr) { if noaltgr != altgr { USES_ALTGR.store(true, Ordering::SeqCst); return true; } } } USES_ALTGR.store(false, Ordering::SeqCst); false } } pub fn vkey_to_winit_vkey(vkey: c_int) -> Option<VirtualKeyCode> { // VK_* codes are documented here https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx match vkey { //winuser::VK_LBUTTON => Some(VirtualKeyCode::Lbutton), //winuser::VK_RBUTTON => Some(VirtualKeyCode::Rbutton), //winuser::VK_CANCEL => Some(VirtualKeyCode::Cancel), //winuser::VK_MBUTTON => Some(VirtualKeyCode::Mbutton), //winuser::VK_XBUTTON1 => Some(VirtualKeyCode::Xbutton1), //winuser::VK_XBUTTON2 => Some(VirtualKeyCode::Xbutton2), winuser::VK_BACK => Some(VirtualKeyCode::Back), winuser::VK_TAB => Some(VirtualKeyCode::Tab), //winuser::VK_CLEAR => Some(VirtualKeyCode::Clear), winuser::VK_RETURN => Some(VirtualKeyCode::Return), winuser::VK_LSHIFT => Some(VirtualKeyCode::LShift), winuser::VK_RSHIFT => Some(VirtualKeyCode::RShift), winuser::VK_LCONTROL => Some(VirtualKeyCode::LControl), winuser::VK_RCONTROL => Some(VirtualKeyCode::RControl), winuser::VK_LMENU => Some(VirtualKeyCode::LAlt), winuser::VK_RMENU => Some(VirtualKeyCode::RAlt), winuser::VK_PAUSE => Some(VirtualKeyCode::Pause), winuser::VK_CAPITAL => Some(VirtualKeyCode::Capital), winuser::VK_KANA => Some(VirtualKeyCode::Kana), //winuser::VK_HANGUEL => Some(VirtualKeyCode::Hanguel), //winuser::VK_HANGUL => Some(VirtualKeyCode::Hangul), //winuser::VK_JUNJA => Some(VirtualKeyCode::Junja), //winuser::VK_FINAL => Some(VirtualKeyCode::Final), //winuser::VK_HANJA => Some(VirtualKeyCode::Hanja), winuser::VK_KANJI => Some(VirtualKeyCode::Kanji), winuser::VK_ESCAPE => Some(VirtualKeyCode::Escape), winuser::VK_CONVERT => Some(VirtualKeyCode::Convert), winuser::VK_NONCONVERT => Some(VirtualKeyCode::NoConvert), //winuser::VK_ACCEPT => Some(VirtualKeyCode::Accept), //winuser::VK_MODECHANGE => Some(VirtualKeyCode::Modechange), winuser::VK_SPACE => Some(VirtualKeyCode::Space), winuser::VK_PRIOR => Some(VirtualKeyCode::PageUp), winuser::VK_NEXT => Some(VirtualKeyCode::PageDown), winuser::VK_END => Some(VirtualKeyCode::End), winuser::VK_HOME => Some(VirtualKeyCode::Home), winuser::VK_LEFT => Some(VirtualKeyCode::Left), winuser::VK_UP => Some(VirtualKeyCode::Up), winuser::VK_RIGHT => Some(VirtualKeyCode::Right), winuser::VK_DOWN => Some(VirtualKeyCode::Down), //winuser::VK_SELECT => Some(VirtualKeyCode::Select), //winuser::VK_PRINT => Some(VirtualKeyCode::Print), //winuser::VK_EXECUTE => Some(VirtualKeyCode::Execute), winuser::VK_SNAPSHOT => Some(VirtualKeyCode::Snapshot), winuser::VK_INSERT => Some(VirtualKeyCode::Insert), winuser::VK_DELETE => Some(VirtualKeyCode::Delete), //winuser::VK_HELP => Some(VirtualKeyCode::Help), 0x30 => Some(VirtualKeyCode::Key0), 0x31 => Some(VirtualKeyCode::Key1), 0x32 => Some(VirtualKeyCode::Key2), 0x33 => Some(VirtualKeyCode::Key3), 0x34 => Some(VirtualKeyCode::Key4), 0x35 => Some(VirtualKeyCode::Key5), 0x36 => Some(VirtualKeyCode::Key6), 0x37 => Some(VirtualKeyCode::Key7), 0x38 => Some(VirtualKeyCode::Key8), 0x39 => Some(VirtualKeyCode::Key9), 0x41 => Some(VirtualKeyCode::A), 0x42 => Some(VirtualKeyCode::B), 0x43 => Some(VirtualKeyCode::C), 0x44 => Some(VirtualKeyCode::D), 0x45 => Some(VirtualKeyCode::E), 0x46 => Some(VirtualKeyCode::F), 0x47 => Some(VirtualKeyCode::G), 0x48 => Some(VirtualKeyCode::H), 0x49 => Some(VirtualKeyCode::I), 0x4A => Some(VirtualKeyCode::J), 0x4B => Some(VirtualKeyCode::K), 0x4C => Some(VirtualKeyCode::L), 0x4D => Some(VirtualKeyCode::M), 0x4E => Some(VirtualKeyCode::N), 0x4F => Some(VirtualKeyCode::O), 0x50 => Some(VirtualKeyCode::P), 0x51 => Some(VirtualKeyCode::Q), 0x52 => Some(VirtualKeyCode::R), 0x53 => Some(VirtualKeyCode::S), 0x54 => Some(VirtualKeyCode::T), 0x55 => Some(VirtualKeyCode::U), 0x56 => Some(VirtualKeyCode::V), 0x57 => Some(VirtualKeyCode::W), 0x58 => Some(VirtualKeyCode::X), 0x59 => Some(VirtualKeyCode::Y), 0x5A => Some(VirtualKeyCode::Z), winuser::VK_LWIN => Some(VirtualKeyCode::LWin), winuser::VK_RWIN => Some(VirtualKeyCode::RWin), winuser::VK_APPS => Some(VirtualKeyCode::Apps), winuser::VK_SLEEP => Some(VirtualKeyCode::Sleep), winuser::VK_NUMPAD0 => Some(VirtualKeyCode::Numpad0), winuser::VK_NUMPAD1 => Some(VirtualKeyCode::Numpad1), winuser::VK_NUMPAD2 => Some(VirtualKeyCode::Numpad2), winuser::VK_NUMPAD3 => Some(VirtualKeyCode::Numpad3), winuser::VK_NUMPAD4 => Some(VirtualKeyCode::Numpad4), winuser::VK_NUMPAD5 => Some(VirtualKeyCode::Numpad5), winuser::VK_NUMPAD6 => Some(VirtualKeyCode::Numpad6), winuser::VK_NUMPAD7 => Some(VirtualKeyCode::Numpad7), winuser::VK_NUMPAD8 => Some(VirtualKeyCode::Numpad8), winuser::VK_NUMPAD9 => Some(VirtualKeyCode::Numpad9), winuser::VK_MULTIPLY => Some(VirtualKeyCode::NumpadMultiply), winuser::VK_ADD => Some(VirtualKeyCode::NumpadAdd), //winuser::VK_SEPARATOR => Some(VirtualKeyCode::Separator), winuser::VK_SUBTRACT => Some(VirtualKeyCode::NumpadSubtract), winuser::VK_DECIMAL => Some(VirtualKeyCode::NumpadDecimal), winuser::VK_DIVIDE => Some(VirtualKeyCode::NumpadDivide), winuser::VK_F1 => Some(VirtualKeyCode::F1), winuser::VK_F2 => Some(VirtualKeyCode::F2), winuser::VK_F3 => Some(VirtualKeyCode::F3), winuser::VK_F4 => Some(VirtualKeyCode::F4), winuser::VK_F5 => Some(VirtualKeyCode::F5), winuser::VK_F6 => Some(VirtualKeyCode::F6), winuser::VK_F7 => Some(VirtualKeyCode::F7), winuser::VK_F8 => Some(VirtualKeyCode::F8), winuser::VK_F9 => Some(VirtualKeyCode::F9), winuser::VK_F10 => Some(VirtualKeyCode::F10), winuser::VK_F11 => Some(VirtualKeyCode::F11), winuser::VK_F12 => Some(VirtualKeyCode::F12), winuser::VK_F13 => Some(VirtualKeyCode::F13), winuser::VK_F14 => Some(VirtualKeyCode::F14), winuser::VK_F15 => Some(VirtualKeyCode::F15), winuser::VK_F16 => Some(VirtualKeyCode::F16), winuser::VK_F17 => Some(VirtualKeyCode::F17), winuser::VK_F18 => Some(VirtualKeyCode::F18), winuser::VK_F19 => Some(VirtualKeyCode::F19), winuser::VK_F20 => Some(VirtualKeyCode::F20), winuser::VK_F21 => Some(VirtualKeyCode::F21), winuser::VK_F22 => Some(VirtualKeyCode::F22), winuser::VK_F23 => Some(VirtualKeyCode::F23), winuser::VK_F24 => Some(VirtualKeyCode::F24), winuser::VK_NUMLOCK => Some(VirtualKeyCode::Numlock), winuser::VK_SCROLL => Some(VirtualKeyCode::Scroll), winuser::VK_BROWSER_BACK => Some(VirtualKeyCode::NavigateBackward), winuser::VK_BROWSER_FORWARD => Some(VirtualKeyCode::NavigateForward), winuser::VK_BROWSER_REFRESH => Some(VirtualKeyCode::WebRefresh), winuser::VK_BROWSER_STOP => Some(VirtualKeyCode::WebStop), winuser::VK_BROWSER_SEARCH => Some(VirtualKeyCode::WebSearch), winuser::VK_BROWSER_FAVORITES => Some(VirtualKeyCode::WebFavorites), winuser::VK_BROWSER_HOME => Some(VirtualKeyCode::WebHome), winuser::VK_VOLUME_MUTE => Some(VirtualKeyCode::Mute), winuser::VK_VOLUME_DOWN => Some(VirtualKeyCode::VolumeDown), winuser::VK_VOLUME_UP => Some(VirtualKeyCode::VolumeUp), winuser::VK_MEDIA_NEXT_TRACK => Some(VirtualKeyCode::NextTrack), winuser::VK_MEDIA_PREV_TRACK => Some(VirtualKeyCode::PrevTrack), winuser::VK_MEDIA_STOP => Some(VirtualKeyCode::MediaStop), winuser::VK_MEDIA_PLAY_PAUSE => Some(VirtualKeyCode::PlayPause), winuser::VK_LAUNCH_MAIL => Some(VirtualKeyCode::Mail), winuser::VK_LAUNCH_MEDIA_SELECT => Some(VirtualKeyCode::MediaSelect), /*winuser::VK_LAUNCH_APP1 => Some(VirtualKeyCode::Launch_app1), winuser::VK_LAUNCH_APP2 => Some(VirtualKeyCode::Launch_app2),*/ winuser::VK_OEM_PLUS => Some(VirtualKeyCode::Equals), winuser::VK_OEM_COMMA => Some(VirtualKeyCode::Comma), winuser::VK_OEM_MINUS => Some(VirtualKeyCode::Minus), winuser::VK_OEM_PERIOD => Some(VirtualKeyCode::Period), winuser::VK_OEM_1 => map_text_keys(vkey), winuser::VK_OEM_2 => map_text_keys(vkey), winuser::VK_OEM_3 => map_text_keys(vkey), winuser::VK_OEM_4 => map_text_keys(vkey), winuser::VK_OEM_5 => map_text_keys(vkey), winuser::VK_OEM_6 => map_text_keys(vkey), winuser::VK_OEM_7 => map_text_keys(vkey), /* winuser::VK_OEM_8 => Some(VirtualKeyCode::Oem_8), */ winuser::VK_OEM_102 => Some(VirtualKeyCode::OEM102), /*winuser::VK_PROCESSKEY => Some(VirtualKeyCode::Processkey), winuser::VK_PACKET => Some(VirtualKeyCode::Packet), winuser::VK_ATTN => Some(VirtualKeyCode::Attn), winuser::VK_CRSEL => Some(VirtualKeyCode::Crsel), winuser::VK_EXSEL => Some(VirtualKeyCode::Exsel), winuser::VK_EREOF => Some(VirtualKeyCode::Ereof), winuser::VK_PLAY => Some(VirtualKeyCode::Play), winuser::VK_ZOOM => Some(VirtualKeyCode::Zoom), winuser::VK_NONAME => Some(VirtualKeyCode::Noname), winuser::VK_PA1 => Some(VirtualKeyCode::Pa1), winuser::VK_OEM_CLEAR => Some(VirtualKeyCode::Oem_clear),*/ _ => None, } } pub fn handle_extended_keys( vkey: c_int, mut scancode: UINT, extended: bool, ) -> Option<(c_int, UINT)> { // Welcome to hell https://blog.molecular-matters.com/2011/09/05/properly-handling-keyboard-input/ let vkey = match vkey { winuser::VK_SHIFT => unsafe { winuser::MapVirtualKeyA(scancode, winuser::MAPVK_VSC_TO_VK_EX) as _ }, winuser::VK_CONTROL => { if extended { winuser::VK_RCONTROL } else { winuser::VK_LCONTROL } } winuser::VK_MENU => { if extended { winuser::VK_RMENU } else { winuser::VK_LMENU } } _ => { match scancode { // This is only triggered when using raw input. Without this check, we get two events whenever VK_PAUSE is // pressed, the first one having scancode 0x1D but vkey VK_PAUSE... 0x1D if vkey == winuser::VK_PAUSE => return None, // ...and the second having scancode 0x45 but an unmatched vkey! 0x45 => winuser::VK_PAUSE, // VK_PAUSE and VK_SCROLL have the same scancode when using modifiers, alongside incorrect vkey values. 0x46 => { if extended { scancode = 0x45; winuser::VK_PAUSE } else { winuser::VK_SCROLL } } _ => vkey, } } }; Some((vkey, scancode)) } pub fn process_key_params( wparam: WPARAM, lparam: LPARAM, ) -> Option<(ScanCode, Option<VirtualKeyCode>)> { let scancode = ((lparam >> 16) & 0xff) as UINT; let extended = (lparam & 0x01000000) != 0; handle_extended_keys(wparam as _, scancode, extended) .map(|(vkey, scancode)| (scancode, vkey_to_winit_vkey(vkey))) } // This is needed as windows doesn't properly distinguish // some virtual key codes for different keyboard layouts fn map_text_keys(win_virtual_key: i32) -> Option<VirtualKeyCode> { let char_key = unsafe { winuser::MapVirtualKeyA(win_virtual_key as u32, winuser::MAPVK_VK_TO_CHAR) } & 0x7FFF; match char::from_u32(char_key) { Some(';') => Some(VirtualKeyCode::Semicolon), Some('/') => Some(VirtualKeyCode::Slash), Some('`') => Some(VirtualKeyCode::Grave), Some('[') => Some(VirtualKeyCode::LBracket), Some(']') => Some(VirtualKeyCode::RBracket), Some('\'') => Some(VirtualKeyCode::Apostrophe), Some('\\') => Some(VirtualKeyCode::Backslash), _ => None, } }
42.794686
142
0.619687
ddc27d868c30068cbe31a867358dcfd7f64b264b
5,599
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0. // #[PerformanceCriticalPath] use crate::storage::kv::{Iterator, Result, Snapshot, RAW_VALUE_TOMBSTONE}; use crate::storage::Statistics; use api_version::KvFormat; use engine_traits::raw_ttl::ttl_current_ts; use engine_traits::CfName; use engine_traits::{IterOptions, ReadOptions}; use std::marker::PhantomData; use txn_types::{Key, Value}; #[derive(Clone)] pub struct RawEncodeSnapshot<S: Snapshot, F: KvFormat> { snap: S, current_ts: u64, _phantom: PhantomData<F>, } impl<S: Snapshot, F: KvFormat> RawEncodeSnapshot<S, F> { pub fn from_snapshot(snap: S) -> Self { RawEncodeSnapshot { snap, current_ts: ttl_current_ts(), _phantom: PhantomData, } } fn map_value(&self, value: Result<Option<Value>>) -> Result<Option<Value>> { if let Some(v) = value? { let raw_value = F::decode_raw_value_owned(v)?; if raw_value.is_valid(self.current_ts) { return Ok(Some(raw_value.user_value)); } } Ok(None) } pub fn get_key_ttl_cf( &self, cf: CfName, key: &Key, stats: &mut Statistics, ) -> Result<Option<u64>> { stats.data.flow_stats.read_keys = 1; stats.data.flow_stats.read_bytes = key.as_encoded().len(); if let Some(v) = self.snap.get_cf(cf, key)? { stats.data.flow_stats.read_bytes += v.len(); let raw_value = F::decode_raw_value_owned(v)?; return match raw_value.expire_ts { Some(expire_ts) if expire_ts <= self.current_ts => Ok(None), Some(expire_ts) => Ok(Some(expire_ts - self.current_ts)), None => Ok(Some(0)), }; } Ok(None) } } impl<S: Snapshot, F: KvFormat> Snapshot for RawEncodeSnapshot<S, F> { type Iter = RawEncodeIterator<S::Iter, F>; type Ext<'a> where S: 'a, = S::Ext<'a>; fn get(&self, key: &Key) -> Result<Option<Value>> { self.map_value(self.snap.get(key)) } fn get_cf(&self, cf: CfName, key: &Key) -> Result<Option<Value>> { self.map_value(self.snap.get_cf(cf, key)) } fn get_cf_opt(&self, opts: ReadOptions, cf: CfName, key: &Key) -> Result<Option<Value>> { self.map_value(self.snap.get_cf_opt(opts, cf, key)) } fn iter(&self, iter_opt: IterOptions) -> Result<Self::Iter> { Ok(RawEncodeIterator::new( self.snap.iter(iter_opt)?, self.current_ts, )) } fn iter_cf(&self, cf: CfName, iter_opt: IterOptions) -> Result<Self::Iter> { Ok(RawEncodeIterator::new( self.snap.iter_cf(cf, iter_opt)?, self.current_ts, )) } #[inline] fn lower_bound(&self) -> Option<&[u8]> { self.snap.lower_bound() } #[inline] fn upper_bound(&self) -> Option<&[u8]> { self.snap.upper_bound() } fn ext(&self) -> S::Ext<'_> { self.snap.ext() } } pub struct RawEncodeIterator<I: Iterator, F: KvFormat> { inner: I, current_ts: u64, skip_invalid: usize, _phantom: PhantomData<F>, } impl<I: Iterator, F: KvFormat> RawEncodeIterator<I, F> { fn new(inner: I, current_ts: u64) -> Self { RawEncodeIterator { inner, current_ts, skip_invalid: 0, _phantom: PhantomData, } } fn find_valid_value(&mut self, mut res: Result<bool>, forward: bool) -> Result<bool> { loop { if res.is_err() { break; } if *res.as_ref().unwrap() { let raw_value = F::decode_raw_value(self.inner.value())?; if !raw_value.is_valid(self.current_ts) { self.skip_invalid += 1; res = if forward { self.inner.next() } else { self.inner.prev() }; continue; } } break; } res } } impl<I: Iterator, F: KvFormat> Drop for RawEncodeIterator<I, F> { fn drop(&mut self) { RAW_VALUE_TOMBSTONE.with(|m| { *m.borrow_mut() += self.skip_invalid; }); } } impl<I: Iterator, F: KvFormat> Iterator for RawEncodeIterator<I, F> { fn next(&mut self) -> Result<bool> { let res = self.inner.next(); self.find_valid_value(res, true) } fn prev(&mut self) -> Result<bool> { let res = self.inner.prev(); self.find_valid_value(res, false) } fn seek(&mut self, key: &Key) -> Result<bool> { let res = self.inner.seek(key); self.find_valid_value(res, true) } fn seek_for_prev(&mut self, key: &Key) -> Result<bool> { let res = self.inner.seek_for_prev(key); self.find_valid_value(res, false) } fn seek_to_first(&mut self) -> Result<bool> { let res = self.inner.seek_to_first(); self.find_valid_value(res, true) } fn seek_to_last(&mut self) -> Result<bool> { let res = self.inner.seek_to_last(); self.find_valid_value(res, false) } fn valid(&self) -> Result<bool> { self.inner.valid() } fn validate_key(&self, key: &Key) -> Result<()> { self.inner.validate_key(key) } fn key(&self) -> &[u8] { self.inner.key() } fn value(&self) -> &[u8] { F::decode_raw_value(self.inner.value()).unwrap().user_value } }
27.312195
93
0.549741
fc9f8d78b7c390ed662a3f4c6041542549e4059b
2,218
use rsdict::RsDict; use succinct::rank::RankSupport; use succinct::select::SelectSupport; fn main() { loop { afl::fuzz!(|data: &[u8]| { let mut bits = Vec::with_capacity(data.len() * 8); for byte in data { for i in 0..8 { bits.push(byte & (1 << i) != 0); } } let mut blocks = Vec::with_capacity(bits.len() / 64); for chunk in bits.chunks_exact(64) { let mut block = 0; for (i, &bit) in chunk.iter().enumerate() { if bit { block |= 1 << i; } } blocks.push(block); } let mut from_bits = RsDict::new(); for &bit in &bits { from_bits.push(bit); } let mut from_blocks = RsDict::from_blocks(blocks.into_iter()); for &bit in &bits[(bits.len() / 64 * 64)..] { from_blocks.push(bit); } let mut one_rank = 0; let mut zero_rank = 0; for (i, &bit) in bits.iter().enumerate() { for r in &[&from_bits, &from_blocks] { assert_eq!(r.get_bit(i as u64), bit); assert_eq!(r.rank(i as u64, false), zero_rank); assert_eq!(r.rank(i as u64, true), one_rank); if bit { assert_eq!(r.select(one_rank as u64, true), Some(i as u64)); } else { assert_eq!(r.select(zero_rank as u64, false), Some(i as u64)); } } if bit { one_rank += 1; } else { zero_rank += 1; } } for r in &[&from_bits, &from_blocks] { for rank in (one_rank + 1)..bits.len() as u64 { assert_eq!(r.select(rank, true), None); } for rank in (zero_rank + 1)..bits.len() as u64 { assert_eq!(r.select(rank, false), None); } } }); } }
32.144928
86
0.397205
14115ec66ef29aa58a5ad75d235638b11d51ca4c
8,886
#![recursion_limit = "128"] use anyhow::{bail, Context, Result}; use chiptool::{generate, svd2ir}; use clap::Parser; use log::*; use regex::Regex; use std::fs; use std::io::Read; use std::{fs::File, io::stdout}; use chiptool::ir::IR; #[derive(Parser)] #[clap(version = "1.0", author = "Dirbaio <[email protected]>")] struct Opts { #[clap(subcommand)] subcommand: Subcommand, } #[derive(Parser)] enum Subcommand { Generate(Generate), ExtractPeripheral(ExtractPeripheral), Transform(Transform), Fmt(Fmt), Check(Check), GenBlock(GenBlock), } /// Extract peripheral from SVD to YAML #[derive(Parser)] struct ExtractPeripheral { /// SVD file path #[clap(long)] svd: String, /// Peripheral from the SVD #[clap(long)] peripheral: String, /// Transforms file path #[clap(long)] transform: Option<String>, } /// Apply transform to YAML #[derive(Parser)] struct Transform { /// Input YAML path #[clap(short, long)] input: String, /// Output YAML path #[clap(short, long)] output: String, /// Transforms file path #[clap(short, long)] transform: String, } /// Generate a PAC directly from a SVD #[derive(Parser)] struct Generate { /// SVD file path #[clap(long)] svd: String, /// Transforms file path #[clap(long)] transform: Option<String>, } /// Reformat a YAML #[derive(Parser)] struct Fmt { /// Peripheral file path files: Vec<String>, /// Error if incorrectly formatted, instead of fixing. #[clap(long)] check: bool, } /// Check a YAML for errors. #[derive(Parser)] struct Check { /// Peripheral file path files: Vec<String>, } /// Generate Rust code from a YAML register block #[derive(Parser)] struct GenBlock { /// Input YAML path #[clap(short, long)] input: String, /// Output YAML path #[clap(short, long)] output: String, } fn main() -> Result<()> { env_logger::init(); let opts: Opts = Opts::parse(); match opts.subcommand { Subcommand::ExtractPeripheral(x) => extract_peripheral(x), Subcommand::Generate(x) => gen(x), Subcommand::Transform(x) => transform(x), Subcommand::Fmt(x) => fmt(x), Subcommand::Check(x) => check(x), Subcommand::GenBlock(x) => gen_block(x), } } fn load_svd(path: &str) -> Result<svd_parser::Device> { let xml = &mut String::new(); File::open(path) .context("Cannot open the SVD file")? .read_to_string(xml) .context("Cannot read the SVD file")?; let device = svd_parser::parse(xml)?; Ok(device) } fn load_config(path: &str) -> Result<Config> { let config = fs::read(path).context("Cannot read the config file")?; serde_yaml::from_slice(&config).context("cannot deserialize config") } fn extract_peripheral(args: ExtractPeripheral) -> Result<()> { let config = match args.transform { Some(s) => load_config(&s)?, None => Config::default(), }; let svd = load_svd(&args.svd)?; let mut ir = IR::new(); let peri = args.peripheral; let mut p = svd .peripherals .iter() .find(|p| p.name == peri) .expect("peripheral not found"); if let Some(f) = &p.derived_from { p = svd .peripherals .iter() .find(|p| p.name == *f) .expect("derivedFrom peripheral not found"); } chiptool::svd2ir::convert_peripheral(&mut ir, p)?; // Fix weird newline spam in descriptions. let re = Regex::new("[ \n]+").unwrap(); chiptool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; for t in &config.transforms { info!("running: {:?}", t); t.run(&mut ir)?; } // Ensure consistent sort order in the YAML. chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); serde_yaml::to_writer(stdout(), &ir).unwrap(); Ok(()) } fn gen(args: Generate) -> Result<()> { let config = match args.transform { Some(s) => load_config(&s)?, None => Config::default(), }; let svd = load_svd(&args.svd)?; let mut ir = svd2ir::convert_svd(&svd)?; // Fix weird newline spam in descriptions. let re = Regex::new("[ \n]+").unwrap(); chiptool::transform::map_descriptions(&mut ir, |d| re.replace_all(d, " ").into_owned())?; for t in &config.transforms { info!("running: {:?}", t); t.run(&mut ir)?; } let generate_opts = generate::Options { common_module: generate::CommonModule::Builtin, }; let items = generate::render(&ir, &generate_opts).unwrap(); fs::write("lib.rs", items.to_string())?; Ok(()) } fn transform(args: Transform) -> Result<()> { let data = fs::read(&args.input)?; let mut ir: IR = serde_yaml::from_slice(&data)?; let config = load_config(&args.transform)?; for t in &config.transforms { info!("running: {:?}", t); t.run(&mut ir)?; } let data = serde_yaml::to_vec(&ir)?; fs::write(&args.output, data)?; Ok(()) } fn fmt(args: Fmt) -> Result<()> { for file in args.files { let got_data = fs::read(&file)?; let mut ir: IR = serde_yaml::from_slice(&got_data)?; // Ensure consistent sort order in the YAML. chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); let want_data = serde_yaml::to_vec(&ir)?; if got_data != want_data { if args.check { bail!("File {} is not correctly formatted", &file); } else { fs::write(&file, want_data)?; } } } Ok(()) } fn check(args: Check) -> Result<()> { for file in args.files { let got_data = fs::read(&file)?; let ir: IR = serde_yaml::from_slice(&got_data)?; let mut printed = false; let mut error = move |s: String| { if !printed { printed = true; println!("{}:", &file); } println!(" {}", s); }; for (name, b) in &ir.blocks { for (i1, i2) in Pairs::new(b.items.iter()) { if i1.byte_offset == i2.byte_offset { error(format!( "block {}: registers overlap: {} {}", name, i1.name, i2.name )); } } } for (name, e) in &ir.enums { for (i1, i2) in Pairs::new(e.variants.iter()) { if i1.value == i2.value { error(format!( "enum {}: variants with same value: {} {}", name, i1.name, i2.name )); } } } for (name, f) in &ir.fieldsets { for (i1, i2) in Pairs::new(f.fields.iter()) { if i2.bit_offset + i2.bit_size > i1.bit_offset && i1.bit_offset + i1.bit_size > i2.bit_offset { error(format!( "fieldset {}: fields overlap: {} {}", name, i1.name, i2.name )); } } } } Ok(()) } fn gen_block(args: GenBlock) -> Result<()> { let data = fs::read(&args.input)?; let mut ir: IR = serde_yaml::from_slice(&data)?; chiptool::transform::Sanitize {}.run(&mut ir).unwrap(); // Ensure consistent sort order in the YAML. chiptool::transform::sort::Sort {}.run(&mut ir).unwrap(); let generate_opts = generate::Options { common_module: generate::CommonModule::Builtin, }; let items = generate::render(&ir, &generate_opts).unwrap(); fs::write(&args.output, items.to_string())?; Ok(()) } #[derive(serde::Serialize, serde::Deserialize)] struct Config { transforms: Vec<chiptool::transform::Transform>, } impl Default for Config { fn default() -> Self { Self { transforms: vec![] } } } // ============== struct Pairs<U: Iterator + Clone> { head: Option<U::Item>, tail: U, next: U, } impl<U: Iterator + Clone> Pairs<U> { fn new(mut iter: U) -> Self { let head = iter.next(); Pairs { head, tail: iter.clone(), next: iter, } } } impl<U: Iterator + Clone> Iterator for Pairs<U> where U::Item: Clone, { type Item = (U::Item, U::Item); fn next(&mut self) -> Option<Self::Item> { let a = self.head.as_ref()?.clone(); if let Some(b) = self.tail.next() { return Some((a, b)); } match self.next.next() { Some(new_head) => { self.head = Some(new_head); self.tail = self.next.clone(); self.next() } None => None, } } }
25.030986
93
0.532861
1c0426808c705c048950b7ceac968425ed4199a8
297
// edition:2018 // The local `use` suggestion should start with `crate::` (but the // standard-library suggestions should not, obviously). mod plumbing { pub struct Drain; } fn main() { let _d = Drain {}; //~^ ERROR cannot find struct, variant or union type `Drain` in this scope }
21.214286
78
0.666667
23dc6892ccc5ee78b503cdd96760aa9994ffaa57
213,525
/* * Copyright 2018 Bitwise IO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ----------------------------------------------------------------------------- */ //! The core PBFT algorithm use std::collections::HashSet; use std::convert::From; use itertools::Itertools; use protobuf::{Message, RepeatedField}; use sawtooth_sdk::consensus::engine::{Block, BlockId, PeerId, PeerInfo}; use sawtooth_sdk::consensus::service::Service; use sawtooth_sdk::messages::consensus::ConsensusPeerMessageHeader; use sawtooth_sdk::signing::{create_context, secp256k1::Secp256k1PublicKey}; use crate::config::{get_members_from_settings, PbftConfig}; use crate::error::PbftError; use crate::hash::verify_sha512; use crate::message_log::PbftLog; use crate::message_type::{ParsedMessage, PbftMessageType}; use crate::protos::pbft_message::{ PbftMessage, PbftMessageInfo, PbftNewView, PbftSeal, PbftSignedVote, }; use crate::state::{PbftMode, PbftPhase, PbftState}; use crate::timing::{retry_until_ok, Timeout}; /// Contains the core logic of the PBFT node pub struct PbftNode { /// Used for interactions with the validator pub service: Box<dyn Service>, /// Log of messages this node has received and accepted pub msg_log: PbftLog, } impl PbftNode { /// Construct a new PBFT node /// /// If the node is the primary on start-up, it initializes a new block on the chain pub fn new( config: &PbftConfig, chain_head: Block, connected_peers: Vec<PeerInfo>, service: Box<dyn Service>, state: &mut PbftState, ) -> Self { let mut n = PbftNode { service, msg_log: PbftLog::new(config), }; // Add chain head to log and update state n.msg_log.add_validated_block(chain_head.clone()); state.chain_head = chain_head.block_id.clone(); // If starting up from a non-genesis block, the node may need to perform some special // actions if chain_head.block_num > 1 { // If starting up with a block that has a consensus seal, update the view to match if let Ok(seal) = protobuf::parse_from_bytes::<PbftSeal>(&chain_head.payload) { state.view = seal.get_info().get_view(); info!("Updated view to {} on startup", state.view); } // If connected to any peers already, send bootstrap commit messages to them for peer in connected_peers { n.broadcast_bootstrap_commit(peer.peer_id, state) .unwrap_or_else(|err| { error!("Failed to broadcast bootstrap commit due to error: {}", err) }); } } // Primary initializes a block if state.is_primary() { n.service.initialize_block(None).unwrap_or_else(|err| { error!("Couldn't initialize block on startup due to error: {}", err) }); } n } // ---------- Methods for handling Updates from the Validator ---------- /// Handle a peer message from another PbftNode /// /// Handle all messages from other nodes. Such messages include `PrePrepare`, `Prepare`, /// `Commit`, `ViewChange`, and `NewView`. Make sure the message is from a PBFT member. If the /// node is view changing, ignore all messages that aren't `ViewChange`s or `NewView`s. pub fn on_peer_message( &mut self, msg: ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { // trace!("{}: Got peer message: {}", state, msg.info()); debug!("{}: Got peer message: {}", state, msg.info()); // Make sure this message is from a known member of the PBFT network if !state.member_ids.contains(&msg.info().signer_id) { return Err(PbftError::InvalidMessage(format!( "Received message from node ({:?}) that is not a member of the PBFT network", hex::encode(msg.info().get_signer_id()), ))); } let msg_type = PbftMessageType::from(msg.info().msg_type.as_str()); // If this node is in the process of a view change, ignore all messages except ViewChanges // and NewViews if match state.mode { PbftMode::ViewChanging(_) => true, _ => false, } && msg_type != PbftMessageType::ViewChange && msg_type != PbftMessageType::NewView { debug!( "{}: Node is view changing; ignoring {} message", state, msg_type ); return Ok(()); } match msg_type { PbftMessageType::PrePrepare => self.handle_pre_prepare(msg, state)?, PbftMessageType::Prepare => self.handle_prepare(msg, state)?, PbftMessageType::Commit => self.handle_commit(msg, state)?, PbftMessageType::ViewChange => self.handle_view_change(&msg, state)?, PbftMessageType::NewView => self.handle_new_view(&msg, state)?, PbftMessageType::SealRequest => self.handle_seal_request(msg, state)?, PbftMessageType::Seal => self.handle_seal_response(&msg, state)?, _ => warn!("Received message with unknown type: {:?}", msg_type), } Ok(()) } /// Handle a `PrePrepare` message /// /// A `PrePrepare` message is accepted and added to the log if the following are true: /// - The message signature is valid (already verified by validator) /// - The message is from the primary /// - The message's view matches the node's current view /// - A `PrePrepare` message does not already exist at this view and sequence number with a /// different block /// /// Once a `PrePrepare` for the current sequence number is accepted and added to the log, the /// node will try to switch to the `Preparing` phase. fn handle_pre_prepare( &mut self, msg: ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { // Check that the message is from the current primary if PeerId::from(msg.info().get_signer_id()) != state.get_primary_id() { warn!( "Got PrePrepare from a secondary node {:?}; ignoring message", msg.info().get_signer_id() ); return Ok(()); } // Check that the message is for the current view if msg.info().get_view() != state.view { return Err(PbftError::InvalidMessage(format!( "Node is on view {}, but a PrePrepare for view {} was received", state.view, msg.info().get_view(), ))); } // Check that no `PrePrepare`s already exist with this view and sequence number but a // different block; if this is violated, the primary is faulty so initiate a view change let mismatched_blocks = self .msg_log .get_messages_of_type_seq_view( PbftMessageType::PrePrepare, msg.info().get_seq_num(), msg.info().get_view(), ) .iter() .filter_map(|existing_msg| { let block_id = existing_msg.get_block_id(); if block_id != msg.get_block_id() { Some(block_id) } else { None } }) .collect::<Vec<_>>(); if !mismatched_blocks.is_empty() { self.start_view_change(state, state.view + 1)?; return Err(PbftError::FaultyPrimary(format!( "When checking PrePrepare with block {:?}, found PrePrepare(s) with same view and \ seq num but mismatched block(s): {:?}", hex::encode(&msg.get_block_id()), mismatched_blocks, ))); } // Add message to the log self.msg_log.add_message(msg.clone()); // If the node is in the PrePreparing phase, this message is for the current sequence // number, and the node already has this block: switch to Preparing self.try_preparing(msg.get_block_id(), state) } /// Handle a `Prepare` message /// /// Once a `Prepare` for the current sequence number is accepted and added to the log, the node /// will check if it has the required 2f + 1 `Prepared` messages to move on to the Committing /// phase fn handle_prepare( &mut self, msg: ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { let info = msg.info().clone(); let block_id = msg.get_block_id(); // Check that the message is for the current view if msg.info().get_view() != state.view { return Err(PbftError::InvalidMessage(format!( "Node is on view {}, but a Prepare for view {} was received", state.view, msg.info().get_view(), ))); } // The primary is not allowed to send a Prepare; its PrePrepare counts as its "vote" if PeerId::from(info.get_signer_id()) == state.get_primary_id() { self.start_view_change(state, state.view + 1)?; return Err(PbftError::FaultyPrimary(format!( "Received Prepare from primary at view {}, seq_num {}", state.view, state.seq_num ))); } self.msg_log.add_message(msg); // If this message is for the current sequence number and the node is in the Preparing // phase, check if the node is ready to move on to the Committing phase if info.get_seq_num() == state.seq_num && state.phase == PbftPhase::Preparing { // The node is ready to move on to the Committing phase (i.e. the predicate `prepared` // is true) when its log has 2f + 1 Prepare messages from different nodes that match // the PrePrepare message received earlier (same view, sequence number, and block) let has_matching_pre_prepare = self.msg_log .has_pre_prepare(info.get_seq_num(), info.get_view(), &block_id); let has_required_prepares = self .msg_log // Only get Prepares with matching seq_num, view, and block_id .get_messages_of_type_seq_view_block( PbftMessageType::Prepare, info.get_seq_num(), info.get_view(), &block_id, ) // Check if there are at least 2f + 1 Prepares .len() as u64 >= 2 * state.f; if has_matching_pre_prepare && has_required_prepares { state.switch_phase(PbftPhase::Committing)?; self.broadcast_pbft_message( state.view, state.seq_num, PbftMessageType::Commit, block_id, state, )?; } } Ok(()) } /// Handle a `Commit` message /// /// Once a `Commit` for the current sequence number is accepted and added to the log, the node /// will check if it has the required 2f + 1 `Commit` messages to actually commit the block fn handle_commit( &mut self, msg: ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { let info = msg.info().clone(); let block_id = msg.get_block_id(); // Check that the message is for the current view if msg.info().get_view() != state.view { return Err(PbftError::InvalidMessage(format!( "Node is on view {}, but a Commit for view {} was received", state.view, msg.info().get_view(), ))); } self.msg_log.add_message(msg); // If this message is for the current sequence number and the node is in the Committing // phase, check if the node is ready to commit the block if info.get_seq_num() == state.seq_num && state.phase == PbftPhase::Committing { // The node is ready to commit the block (i.e. the predicate `committable` is true) // when its log has 2f + 1 Commit messages from different nodes that match the // PrePrepare message received earlier (same view, sequence number, and block) let has_matching_pre_prepare = self.msg_log .has_pre_prepare(info.get_seq_num(), info.get_view(), &block_id); let has_required_commits = self .msg_log // Only get Commits with matching seq_num, view, and block_id .get_messages_of_type_seq_view_block( PbftMessageType::Commit, info.get_seq_num(), info.get_view(), &block_id, ) // Check if there are at least 2f + 1 Commits .len() as u64 > 2 * state.f; if has_matching_pre_prepare && has_required_commits { self.service.commit_block(block_id.clone()).map_err(|err| { PbftError::ServiceError( format!("Failed to commit block {:?}", hex::encode(&block_id)), err, ) })?; state.switch_phase(PbftPhase::Finishing(false))?; // Stop the commit timeout, since the network has agreed to commit the block state.commit_timeout.stop(); } } Ok(()) } /// Handle a `ViewChange` message /// /// When a `ViewChange` is received, check that it isn't outdated and add it to the log. If the /// node isn't already view changing but it now has f + 1 ViewChange messages, start view /// changing early. If the node is the primary and has 2f view change messages now, broadcast /// the NewView message to the rest of the nodes to move to the new view. fn handle_view_change( &mut self, msg: &ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { // Ignore old view change messages (already on a view >= the one this message is // for or already trying to change to a later view) let msg_view = msg.info().get_view(); if msg_view <= state.view || match state.mode { PbftMode::ViewChanging(v) => msg_view < v, _ => false, } { debug!("Ignoring stale view change message for view {}", msg_view); return Ok(()); } self.msg_log.add_message(msg.clone()); // Even if the node hasn't detected a faulty primary yet, start view changing if there are // f + 1 ViewChange messages in the log for this proposed view (but if already view // changing, only do this for a later view); this will prevent starting the view change too // late let is_later_view = match state.mode { PbftMode::ViewChanging(v) => msg_view > v, PbftMode::Normal => true, }; let start_view_change = self .msg_log // Only get ViewChanges with matching view .get_messages_of_type_view(PbftMessageType::ViewChange, msg_view) // Check if there are at least f + 1 ViewChanges .len() as u64 > state.f; if is_later_view && start_view_change { info!( "{}: Received f + 1 ViewChange messages; starting early view change", state ); // Can exit early since the node will self-send another ViewChange message here return self.start_view_change(state, msg_view); } let messages = self .msg_log .get_messages_of_type_view(PbftMessageType::ViewChange, msg_view); // If there are 2f + 1 ViewChange messages and the view change timeout is not already // started, update the timeout and start it if !state.view_change_timeout.is_active() && messages.len() as u64 > state.f * 2 { state.view_change_timeout = Timeout::new( state .view_change_duration .checked_mul((msg_view - state.view) as u32) .expect("View change timeout has overflowed"), ); state.view_change_timeout.start(); } // If this node is the new primary and the required 2f ViewChange messages (not including // the primary's own) are present in the log, broadcast the NewView message let messages_from_other_nodes = messages .iter() .filter(|msg| !msg.from_self) .cloned() .collect::<Vec<_>>(); if state.is_primary_at_view(msg_view) && messages_from_other_nodes.len() as u64 >= 2 * state.f { let mut new_view = PbftNewView::new(); new_view.set_info(PbftMessageInfo::new_from( PbftMessageType::NewView, msg_view, state.seq_num - 1, state.id.clone(), )); new_view.set_view_changes(Self::signed_votes_from_messages( messages_from_other_nodes.as_slice(), )); trace!("Created NewView message: {:?}", new_view); self.broadcast_message(ParsedMessage::from_new_view_message(new_view)?, state)?; } Ok(()) } /// Handle a `NewView` message /// /// When a `NewView` is received, verify that it is valid; if it is, update the view and the /// node's state. fn handle_new_view( &mut self, msg: &ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { let new_view = msg.get_new_view_message(); match self.verify_new_view(new_view, state) { Ok(_) => trace!("NewView passed verification"), Err(err) => { return Err(PbftError::InvalidMessage(format!( "NewView failed verification - Error was: {}", err ))); } } // If this node was the primary before, cancel any block that may have been initialized if state.is_primary() { self.service.cancel_block().unwrap_or_else(|err| { info!("Failed to cancel block when becoming secondary: {:?}", err); }); } // Update view state.view = new_view.get_info().get_view(); state.view_change_timeout.stop(); info!("{}: Updated to view {}", state, state.view); // Reset state to Normal mode, reset the phase (unless waiting for a BlockCommit) and // restart the idle timeout state.mode = PbftMode::Normal; if match state.phase { PbftPhase::Finishing(_) => false, _ => true, } { state.phase = PbftPhase::PrePreparing; } state.idle_timeout.start(); // Initialize a new block if this node is the new primary if state.is_primary() { self.service.initialize_block(None).map_err(|err| { PbftError::ServiceError("Couldn't initialize block after view change".into(), err) })?; } Ok(()) } /// Handle a `SealRequest` message /// /// A node is requesting a consensus seal for the last block. If the block was the last one /// committed by this node, build the seal and send it to the requesting node; if the block has /// not been committed yet but it's the next one to be committed, add the request to the log /// and the node will build/send the seal when it's done committing. If this is an older block /// (state.seq_num > msg.seq_num + 1) or this node is behind (state.seq_num < msg.seq_num), the /// node will not be able to build the requseted seal, so just ignore the message. fn handle_seal_request( &mut self, msg: ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { if state.seq_num == msg.info().get_seq_num() + 1 { return self.send_seal_response(state, &msg.info().get_signer_id().to_vec()); } else if state.seq_num == msg.info().get_seq_num() { self.msg_log.add_message(msg); } Ok(()) } /// Handle a `Seal` message /// /// A node has responded to the seal request by sending a seal for the last block; validate the /// seal and commit the block. fn handle_seal_response( &mut self, msg: &ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { let seal = msg.get_seal(); // If the node has already committed the block, ignore if let PbftPhase::Finishing(_) = state.phase { return Ok(()); } // Get the previous ID of the block this seal is for so it can be used to verify the seal let previous_id = self .msg_log .get_block_with_id(seal.block_id.as_slice()) // Make sure the node actually has the block .ok_or_else(|| { PbftError::InvalidMessage(format!( "Received a seal for a block ({:?}) that the node does not have", hex::encode(&seal.block_id), )) }) .and_then(|block| { // Make sure the block is at the node's current sequence number if block.block_num != state.seq_num { Err(PbftError::InvalidMessage(format!( "Received a seal for block {:?}, but block_num does not match node's \ seq_num: {} != {}", hex::encode(&seal.block_id), block.block_num, state.seq_num, ))) } else { Ok(block.previous_id.clone()) } })?; // Verify the seal match self.verify_consensus_seal(seal, previous_id, state) { Ok(_) => { trace!("Consensus seal passed verification"); } Err(err) => { return Err(PbftError::InvalidMessage(format!( "Consensus seal failed verification - Error was: {}", err ))); } } // Catch up self.catchup(state, &seal, false) } /// Handle a `BlockNew` update from the Validator /// /// The validator has received a new block; check if it is a block that should be considered, /// add it to the log as an unvalidated block, and instruct the validator to validate it. pub fn on_block_new(&mut self, block: Block, state: &mut PbftState) -> Result<(), PbftError> { info!( "{}: Got BlockNew: {} / {}", state, block.block_num, hex::encode(&block.block_id) ); trace!("Block details: {:?}", block); // Only future blocks should be considered since committed blocks are final if block.block_num < state.seq_num { self.service .fail_block(block.block_id.clone()) .unwrap_or_else(|err| error!("Couldn't fail block due to error: {:?}", err)); return Err(PbftError::InternalError(format!( "Received block {:?} / {:?} that is older than the current sequence number: {:?}", block.block_num, hex::encode(&block.block_id), state.seq_num, ))); } // Make sure the node already has the previous block, since the consensus seal can't be // verified without it let previous_block = self .msg_log .get_block_with_id(block.previous_id.as_slice()) .or_else(|| { self.msg_log .get_unvalidated_block_with_id(block.previous_id.as_slice()) }); if previous_block.is_none() { self.service .fail_block(block.block_id.clone()) .unwrap_or_else(|err| error!("Couldn't fail block due to error: {:?}", err)); return Err(PbftError::InternalError(format!( "Received block {:?} / {:?} but node does not have previous block {:?}", block.block_num, hex::encode(&block.block_id), hex::encode(&block.previous_id), ))); } // Make sure that the previous block has the previous block number (enforces that blocks // are strictly monotically increasing by 1) let previous_block = previous_block.expect("Previous block's existence already checked"); if previous_block.block_num != block.block_num - 1 { self.service .fail_block(block.block_id.clone()) .unwrap_or_else(|err| error!("Couldn't fail block due to error: {:?}", err)); return Err(PbftError::InternalError(format!( "Received block {:?} / {:?} but its previous block ({:?} / {:?}) does not have \ the previous block_num", block.block_num, hex::encode(&block.block_id), block.block_num - 1, hex::encode(&block.previous_id), ))); } // Add the currently unvalidated block to the log self.msg_log.add_unvalidated_block(block.clone()); // Have the validator check the block self.service .check_blocks(vec![block.block_id.clone()]) .map_err(|err| { PbftError::ServiceError( format!( "Failed to check block {:?} / {:?}", block.block_num, hex::encode(&block.block_id), ), err, ) })?; Ok(()) } /// Handle a `BlockValid` update from the Validator /// /// The block has been verified by the validator, so mark it as validated in the log and /// attempt to handle the block. pub fn on_block_valid( &mut self, block_id: BlockId, state: &mut PbftState, ) -> Result<(), PbftError> { info!("Got BlockValid: {}", hex::encode(&block_id)); // Mark block as validated in the log and get the block let block = self .msg_log .block_validated(block_id.clone()) .ok_or_else(|| { PbftError::InvalidMessage(format!( "Received BlockValid message for an unknown block: {}", hex::encode(&block_id) )) })?; self.try_handling_block(block, state) } /// Validate the block's seal and handle the block. If this is the block the node is waiting /// for and this node is the primary, broadcast a PrePrepare; if the node isn't the primary but /// it already has the PrePrepare for this block, switch to `Preparing`. If this is a future /// block, use it to catch up. fn try_handling_block(&mut self, block: Block, state: &mut PbftState) -> Result<(), PbftError> { // If the block's number is higher than the current sequence number + 1 (i.e., it is newer // than the grandchild of the last committed block), the seal cannot be verified; this is // because the settings in a block's grandparent are needed to verify the block's seal, and // these settings are only guaranteed to be in the validator's state when the block is // committed. If this is a newer block, wait until after the grandparent is committed // before validating the seal and handling the block. if block.block_num > state.seq_num + 1 { return Ok(()); } let seal = self .verify_consensus_seal_from_block(&block, state) .map_err(|err| { self.service .fail_block(block.block_id.clone()) .unwrap_or_else(|err| error!("Couldn't fail block due to error: {:?}", err)); PbftError::InvalidMessage(format!( "Consensus seal failed verification - Error was: {}", err )) })?; // This block's seal can be used to commit the block previous to it (i.e. catch-up) if it's // a future block and the node isn't waiting for a commit message for a previous block (if // it is waiting for a commit message, catch-up will have to be done after the message is // received) let is_waiting = match state.phase { PbftPhase::Finishing(_) => true, _ => false, }; if block.block_num > state.seq_num && !is_waiting { self.catchup(state, &seal, true)?; } else if block.block_num == state.seq_num { if block.signer_id == state.id && state.is_primary() { // This is the next block and this node is the primary; broadcast PrePrepare // messages info!("Broadcasting PrePrepares"); self.broadcast_pbft_message( state.view, state.seq_num, PbftMessageType::PrePrepare, block.block_id, state, )?; } else { // If the node is in the PrePreparing phase and it already has a PrePrepare for // this block: switch to Preparing self.try_preparing(block.block_id, state)?; } } Ok(()) } /// Handle a `BlockInvalid` update from the Validator /// /// The block is invalid, so drop it from the log and fail it. pub fn on_block_invalid(&mut self, block_id: BlockId) -> Result<(), PbftError> { info!("Got BlockInvalid: {}", hex::encode(&block_id)); // Drop block from the log if !self.msg_log.block_invalidated(block_id.clone()) { return Err(PbftError::InvalidMessage(format!( "Received BlockInvalid message for an unknown block: {}", hex::encode(&block_id) ))); } // Fail the block self.service .fail_block(block_id) .unwrap_or_else(|err| error!("Couldn't fail block due to error: {:?}", err)); Ok(()) } /// Use the given consensus seal to verify and commit the block this node is working on fn catchup( &mut self, state: &mut PbftState, seal: &PbftSeal, catchup_again: bool, ) -> Result<(), PbftError> { info!( "{}: Attempting to commit block {} using catch-up", state, state.seq_num ); let messages = seal .get_commit_votes() .iter() .try_fold(Vec::new(), |mut msgs, vote| { msgs.push(ParsedMessage::from_signed_vote(vote)?); Ok(msgs) })?; // Update view if necessary let view = messages[0].info().get_view(); if view != state.view { info!("Updating view from {} to {}", state.view, view); state.view = view; } // Add messages to the log for message in &messages { self.msg_log.add_message(message.clone()); } // Commit the block, stop the idle timeout, and skip straight to Finishing self.service .commit_block(seal.block_id.clone()) .map_err(|err| { PbftError::ServiceError( format!( "Failed to commit block with catch-up {:?} / {:?}", state.seq_num, hex::encode(&seal.block_id) ), err, ) })?; state.idle_timeout.stop(); state.phase = PbftPhase::Finishing(catchup_again); Ok(()) } /// Handle a `BlockCommit` update from the Validator /// /// A block was sucessfully committed; clean up any uncommitted blocks, update state to be /// ready for the next block, make any necessary view and membership changes, garbage collect /// the logs, and start a new block if this node is the primary. pub fn on_block_commit( &mut self, block_id: BlockId, state: &mut PbftState, ) -> Result<(), PbftError> { info!("{}: Got BlockCommit for {}", state, hex::encode(&block_id)); let is_catching_up = match state.phase { PbftPhase::Finishing(true) => true, _ => false, }; // If there are any blocks in the log at this sequence number other than the one that was // just committed, reject them let invalid_block_ids = self .msg_log .get_blocks_with_num(state.seq_num) .iter() .filter_map(|block| { if block.block_id != block_id { Some(block.block_id.clone()) } else { None } }) .collect::<Vec<_>>(); for id in invalid_block_ids { self.service.fail_block(id.clone()).unwrap_or_else(|err| { error!( "Couldn't fail block {:?} due to error: {:?}", &hex::encode(id), err ) }); } // Increment sequence number and update state state.seq_num += 1; state.mode = PbftMode::Normal; state.phase = PbftPhase::PrePreparing; state.chain_head = block_id.clone(); // If node(s) are waiting for a seal to commit the last block, send it now let requesters = self .msg_log .get_messages_of_type_seq(PbftMessageType::SealRequest, state.seq_num - 1) .iter() .map(|req| req.info().get_signer_id().to_vec()) .collect::<Vec<_>>(); for req in requesters { self.send_seal_response(state, &req).unwrap_or_else(|err| { error!("Failed to send seal response due to: {:?}", err); }); } // Update membership if necessary self.update_membership(block_id.clone(), state); // Increment the view if a view change must be forced for fairness if state.at_forced_view_change() { state.view += 1; } // Tell the log to garbage collect if it needs to self.msg_log.garbage_collect(state.seq_num); // If the node already has grandchild(ren) of the block that was just committed, one of // them may be used to perform catch-up to commit the next block. let grandchildren = self .msg_log .get_blocks_with_num(state.seq_num + 1) .iter() .cloned() .cloned() .collect::<Vec<_>>(); for block in grandchildren { if self.try_handling_block(block, state).is_ok() { return Ok(()); } } // If the node is catching up but doesn't have a block with a seal to commit the next one, // it will need to request the seal to commit the last block. The node doesn't know which // block that the network decided to commit, so it can't request the seal for a specific // block (puts an empty BlockId in the message) if is_catching_up { info!( "{}: Requesting seal to finish catch-up to block {}", state, state.seq_num ); return self.broadcast_pbft_message( state.view, state.seq_num, PbftMessageType::SealRequest, BlockId::new(), state, ); } // Start the idle timeout for the next block state.idle_timeout.start(); // If we already have a block at this sequence number with a valid PrePrepare for it, start // Preparing (there may be multiple blocks, but only one will have a valid PrePrepare) let block_ids = self .msg_log .get_blocks_with_num(state.seq_num) .iter() .map(|block| block.block_id.clone()) .collect::<Vec<_>>(); for id in block_ids { self.try_preparing(id, state)?; } // Initialize a new block if this node is the primary and it is not in the process of // catching up if state.is_primary() { info!( "{}: Initializing block on top of {}", state, hex::encode(&block_id) ); self.service .initialize_block(Some(block_id)) .map_err(|err| { PbftError::ServiceError("Couldn't initialize block after commit".into(), err) })?; } Ok(()) } /// Check the on-chain list of members; if it has changed, update members list and return true. /// /// # Panics /// + If the `sawtooth.consensus.pbft.members` setting is unset or invalid /// + If the network this node is on does not have enough nodes to be Byzantine fault tolernant fn update_membership(&mut self, block_id: BlockId, state: &mut PbftState) { // Get list of members from settings (retry until a valid result is received) trace!("Getting on-chain list of members to check for membership updates"); let settings = retry_until_ok( state.exponential_retry_base, state.exponential_retry_max, || { self.service.get_settings( block_id.clone(), vec![String::from("sawtooth.consensus.pbft.members")], ) }, ); let on_chain_members = get_members_from_settings(&settings); if on_chain_members != state.member_ids { info!("Updating membership: {:?}", on_chain_members); state.member_ids = on_chain_members; let f = (state.member_ids.len() - 1) / 3; if f == 0 { panic!("This network no longer contains enough nodes to be fault tolerant"); } state.f = f as u64; } } /// When the node has a block and a corresponding PrePrepare for its current sequence number, /// and it is in the PrePreparing phase, it can enter the Preparing phase and broadcast its /// Prepare fn try_preparing(&mut self, block_id: BlockId, state: &mut PbftState) -> Result<(), PbftError> { if let Some(block) = self.msg_log.get_block_with_id(&block_id) { if state.phase == PbftPhase::PrePreparing && self.msg_log.has_pre_prepare(state.seq_num, state.view, &block_id) // PrePrepare.seq_num == state.seq_num == block.block_num enforces the one-to-one // correlation between seq_num and block_num (PrePrepare n should be for block n) && block.block_num == state.seq_num { state.switch_phase(PbftPhase::Preparing)?; // Stop idle timeout, since a new block and valid PrePrepare were received in time state.idle_timeout.stop(); // Now start the commit timeout in case the network fails to commit the block // within a reasonable amount of time state.commit_timeout.start(); // The primary doesn't broadcast a Prepare; its PrePrepare counts as its "vote" if !state.is_primary() { self.broadcast_pbft_message( state.view, state.seq_num, PbftMessageType::Prepare, block_id, state, )?; } } } Ok(()) } /// Handle a `PeerConnected` update from the Validator /// /// A peer has just connected to this node. Send a bootstrap commit message if the peer is part /// of the network and the node isn't at the genesis block. pub fn on_peer_connected( &mut self, peer_id: PeerId, state: &mut PbftState, ) -> Result<(), PbftError> { // Ignore if the peer is not a member of the PBFT network or the chain head is block 0 if !state.member_ids.contains(&peer_id) || state.seq_num == 1 { return Ok(()); } self.broadcast_bootstrap_commit(peer_id, state) } /// When the whole network is starting "fresh" from a non-genesis block, none of the nodes will /// have the `Commit` messages necessary to build the consensus seal for the last committed /// block (the chain head). To bootstrap the network in this scenario, all nodes will send a /// `Commit` message for their chain head whenever one of the PBFT members connects; when /// > 2f + 1 nodes have connected and received these `Commit` messages, the nodes will be able /// to build a seal using the messages. fn broadcast_bootstrap_commit( &mut self, peer_id: PeerId, state: &mut PbftState, ) -> Result<(), PbftError> { // The network must agree on a single view number for the Commit messages, so the view // of the chain head's predecessor is used. For block 1 this is view 0; otherwise, it's the // view of the block's consensus seal let view = if state.seq_num == 2 { 0 } else { self.msg_log .get_block_with_id(&state.chain_head) .ok_or_else(|| { PbftError::InternalError(format!( "Node does not have chain head ({:?}) in its log", state.chain_head )) }) .and_then(|block| { protobuf::parse_from_bytes::<PbftSeal>(&block.payload).map_err(|err| { PbftError::SerializationError( "Error parsing seal from chain head".into(), err, ) }) })? .get_info() .get_view() }; // Construct the commit message for the chain head and send it to the connected peer let mut commit = PbftMessage::new(); commit.set_info(PbftMessageInfo::new_from( PbftMessageType::Commit, view, state.seq_num - 1, state.id.clone(), )); commit.set_block_id(state.chain_head.clone()); let bytes = commit.write_to_bytes().map_err(|err| { PbftError::SerializationError("Error writing commit to bytes".into(), err) })?; self.service .send_to( &peer_id, String::from(PbftMessageType::Commit).as_str(), bytes, ) .map_err(|err| { PbftError::ServiceError( format!("Failed to send Commit to {:?}", hex::encode(peer_id)), err, ) }) } // ---------- Methods for building & verifying proofs and signed messages from other nodes ---------- /// Generate a `protobuf::RepeatedField` of signed votes from a list of parsed messages fn signed_votes_from_messages(msgs: &[&ParsedMessage]) -> RepeatedField<PbftSignedVote> { RepeatedField::from( msgs.iter() .map(|m| { let mut vote = PbftSignedVote::new(); vote.set_header_bytes(m.header_bytes.clone()); vote.set_header_signature(m.header_signature.clone()); vote.set_message_bytes(m.message_bytes.clone()); vote }) .collect::<Vec<_>>(), ) } /// Build a consensus seal that proves the last block committed by this node fn build_seal(&self, state: &PbftState) -> Result<PbftSeal, PbftError> { trace!("{}: Building seal for block {}", state, state.seq_num - 1); // The previous block may have been committed in a different view, so the node will need to // find the view that contains the required 2f Commit messages for building the seal let (block_id, view, messages) = self .msg_log .get_messages_of_type_seq(PbftMessageType::Commit, state.seq_num - 1) .iter() // Filter out this node's own messages because self-sent messages aren't signed and // therefore can't be included in the seal .filter(|msg| !msg.from_self) .cloned() // Map to ((block_id, view), msg) .map(|msg| ((msg.get_block_id(), msg.info().get_view()), msg)) // Group messages together by block and view .into_group_map() .into_iter() // One and only one block/view should have the required number of messages, since only // one block at this sequence number should have been committed and in only one view .find_map(|((block_id, view), msgs)| { if msgs.len() as u64 >= 2 * state.f { Some((block_id, view, msgs)) } else { None } }) .ok_or_else(|| { PbftError::InternalError(String::from( "Couldn't find 2f commit messages in the message log for building a seal", )) })?; let mut seal = PbftSeal::new(); seal.set_info(PbftMessageInfo::new_from( PbftMessageType::Seal, view, state.seq_num - 1, state.id.clone(), )); seal.set_block_id(block_id); seal.set_commit_votes(Self::signed_votes_from_messages(messages.as_slice())); trace!("Seal created: {:?}", seal); Ok(seal) } /// Verify that a vote matches the expected type, is properly signed, and passes the specified /// criteria; if it passes verification, return the signer ID to be used for further /// verification fn verify_vote<F>( vote: &PbftSignedVote, expected_type: PbftMessageType, validation_criteria: F, ) -> Result<PeerId, PbftError> where F: Fn(&PbftMessage) -> Result<(), PbftError>, { // Parse the message let pbft_message: PbftMessage = protobuf::parse_from_bytes(&vote.get_message_bytes()) .map_err(|err| { PbftError::SerializationError("Error parsing PbftMessage from vote".into(), err) })?; let header: ConsensusPeerMessageHeader = protobuf::parse_from_bytes(&vote.get_header_bytes()).map_err(|err| { PbftError::SerializationError("Error parsing header from vote".into(), err) })?; trace!( "Verifying vote with PbftMessage: {:?} and header: {:?}", pbft_message, header ); // Verify the header's signer matches the PbftMessage's signer if header.signer_id != pbft_message.get_info().get_signer_id() { return Err(PbftError::InvalidMessage(format!( "Received a vote where PbftMessage's signer ID ({:?}) and PeerMessage's signer ID \ ({:?}) don't match", pbft_message.get_info().get_signer_id(), header.signer_id ))); } /* // Verify the message type let msg_type = PbftMessageType::from(pbft_message.get_info().get_msg_type()); if msg_type != expected_type { return Err(PbftError::InvalidMessage(format!( "Received a {:?} vote, but expected a {:?}", msg_type, expected_type ))); } // Verify the signature let key = Secp256k1PublicKey::from_hex(&hex::encode(&header.signer_id)).map_err(|err| { PbftError::SigningError(format!( "Couldn't parse public key from signer ID ({:?}) due to error: {:?}", header.signer_id, err )) })?; let context = create_context("secp256k1").map_err(|err| { PbftError::SigningError(format!("Couldn't create context due to error: {}", err)) })?; match context.verify( &hex::encode(vote.get_header_signature()), vote.get_header_bytes(), &key, ) { Ok(true) => {} Ok(false) => { return Err(PbftError::SigningError(format!( "Vote ({}) failed signature verification", vote ))); } Err(err) => { return Err(PbftError::SigningError(format!( "Error while verifying vote signature: {:?}", err ))); } } verify_sha512(vote.get_message_bytes(), header.get_content_sha512())?; */ // Validate against the specified criteria validation_criteria(&pbft_message)?; Ok(PeerId::from(pbft_message.get_info().get_signer_id())) } /// Verify that a NewView messsage is valid fn verify_new_view( &mut self, new_view: &PbftNewView, state: &mut PbftState, ) -> Result<(), PbftError> { // Make sure this is for a future view (prevents re-using old NewView messages) if new_view.get_info().get_view() <= state.view { return Err(PbftError::InvalidMessage(format!( "Node is on view {}, but received NewView message for view {}", state.view, new_view.get_info().get_view(), ))); } // Make sure this is from the new primary if PeerId::from(new_view.get_info().get_signer_id()) != state.get_primary_id_at_view(new_view.get_info().get_view()) { return Err(PbftError::InvalidMessage(format!( "Received NewView message for view {} that is not from the primary for that view", new_view.get_info().get_view() ))); } // Verify each individual vote and extract the signer ID from each ViewChange so the IDs // can be verified let voter_ids = new_view .get_view_changes() .iter() .try_fold(HashSet::new(), |mut ids, vote| { Self::verify_vote(vote, PbftMessageType::ViewChange, |msg| { if msg.get_info().get_view() != new_view.get_info().get_view() { return Err(PbftError::InvalidMessage(format!( "ViewChange's view number ({}) doesn't match NewView's view \ number ({})", msg.get_info().get_view(), new_view.get_info().get_view(), ))); } Ok(()) }) .map(|id| ids.insert(id))?; Ok(ids) })?; // All of the votes must come from PBFT members, and the primary can't explicitly vote // itself, since broacasting the NewView is an implicit vote. Check that the votes received // are from a subset of "members - primary". let peer_ids: HashSet<_> = state .member_ids .iter() .cloned() .filter(|pid| pid != &PeerId::from(new_view.get_info().get_signer_id())) .collect(); trace!( "Comparing voter IDs ({:?}) with member IDs - primary ({:?})", voter_ids, peer_ids ); if !voter_ids.is_subset(&peer_ids) { return Err(PbftError::InvalidMessage(format!( "NewView contains vote(s) from invalid IDs: {:?}", voter_ids.difference(&peer_ids).collect::<Vec<_>>() ))); } // Check that the NewView contains 2f votes (primary vote is implicit, so total of 2f + 1) if (voter_ids.len() as u64) < 2 * state.f { return Err(PbftError::InvalidMessage(format!( "NewView needs {} votes, but only {} found", 2 * state.f, voter_ids.len() ))); } Ok(()) } /// Verify the consensus seal from the current block that proves the previous block and return /// the parsed seal fn verify_consensus_seal_from_block( &mut self, block: &Block, state: &mut PbftState, ) -> Result<PbftSeal, PbftError> { // Since block 0 is genesis, block 1 is the first that can be verified with a seal; this // means that the node won't see a seal until block 2 if block.block_num < 2 { return Ok(PbftSeal::new()); } // Parse the seal if block.payload.is_empty() { return Err(PbftError::InvalidMessage( "Block published without a seal".into(), )); } let seal: PbftSeal = protobuf::parse_from_bytes(&block.payload).map_err(|err| { PbftError::SerializationError("Error parsing seal for verification".into(), err) })?; trace!("Parsed seal: {}", seal); // Make sure this is the correct seal for the previous block if seal.block_id != &block.previous_id[..] { return Err(PbftError::InvalidMessage(format!( "Seal's ID ({}) doesn't match block's previous ID ({})", hex::encode(&seal.block_id), hex::encode(&block.previous_id) ))); } // Get the previous ID of the block this seal is supposed to prove so it can be used to // verify the seal let proven_block_previous_id = self .msg_log .get_block_with_id(seal.block_id.as_slice()) .map(|proven_block| proven_block.previous_id.clone()) .ok_or_else(|| { PbftError::InternalError(format!( "Got seal for block {:?}, but block was not found in the log", seal.block_id, )) })?; // Verify the seal itself self.verify_consensus_seal(&seal, proven_block_previous_id, state)?; Ok(seal) } /// Verify the given consenus seal /// /// # Panics /// + If the `sawtooth.consensus.pbft.members` setting is unset or invalid fn verify_consensus_seal( &mut self, seal: &PbftSeal, previous_id: BlockId, state: &mut PbftState, ) -> Result<(), PbftError> { // Verify each individual vote and extract the signer ID from each PbftMessage so the IDs // can be verified let voter_ids = seal.get_commit_votes() .iter() .try_fold(HashSet::new(), |mut ids, vote| { Self::verify_vote(vote, PbftMessageType::Commit, |msg| { // Make sure all votes are for the right block if msg.block_id != seal.block_id { return Err(PbftError::InvalidMessage(format!( "Commit vote's block ID ({:?}) doesn't match seal's ID ({:?})", msg.block_id, seal.block_id ))); } // Make sure all votes are for the right view if msg.get_info().get_view() != seal.get_info().get_view() { return Err(PbftError::InvalidMessage(format!( "Commit vote's view ({:?}) doesn't match seal's view ({:?})", msg.get_info().get_view(), seal.get_info().get_view() ))); } // Make sure all votes are for the right sequence number if msg.get_info().get_seq_num() != seal.get_info().get_seq_num() { return Err(PbftError::InvalidMessage(format!( "Commit vote's seq_num ({:?}) doesn't match seal's seq_num ({:?})", msg.get_info().get_seq_num(), seal.get_info().get_seq_num() ))); } Ok(()) }) .map(|id| ids.insert(id))?; Ok(ids) })?; // All of the votes in a seal must come from PBFT members, and the primary can't explicitly // vote itself, since building a consensus seal is an implicit vote. Check that the votes // received are from a subset of "members - seal creator". Use the list of members from the // block previous to the one this seal verifies, since that represents the state of the // network at the time this block was voted on. trace!("Getting on-chain list of members to verify seal"); let settings = retry_until_ok( state.exponential_retry_base, state.exponential_retry_max, || { self.service.get_settings( previous_id.clone(), vec![String::from("sawtooth.consensus.pbft.members")], ) }, ); let members = get_members_from_settings(&settings); // Verify that the seal's signer is a PBFT member if !members.contains(&seal.get_info().get_signer_id().to_vec()) { return Err(PbftError::InvalidMessage(format!( "Consensus seal is signed by an unknown peer: {:?}", seal.get_info().get_signer_id() ))); } let peer_ids: HashSet<_> = members .iter() .cloned() .filter(|pid| pid.as_slice() != seal.get_info().get_signer_id()) .collect(); trace!( "Comparing voter IDs ({:?}) with on-chain member IDs - primary ({:?})", voter_ids, peer_ids ); if !voter_ids.is_subset(&peer_ids) { return Err(PbftError::InvalidMessage(format!( "Consensus seal contains vote(s) from invalid ID(s): {:?}", voter_ids.difference(&peer_ids).collect::<Vec<_>>() ))); } // Check that the seal contains 2f votes (primary vote is implicit, so total of 2f + 1) if (voter_ids.len() as u64) < 2 * state.f { return Err(PbftError::InvalidMessage(format!( "Consensus seal needs {} votes, but only {} found", 2 * state.f, voter_ids.len() ))); } Ok(()) } // ---------- Methods called in the main engine loop to periodically check and update state ---------- /// At a regular interval, try to finalize a block when the primary is ready pub fn try_publish(&mut self, state: &mut PbftState) -> Result<(), PbftError> { // Only the primary takes care of this, and we try publishing a block // on every engine loop, even if it's not yet ready. This isn't an error, // so just return Ok(()). if !state.is_primary() || state.phase != PbftPhase::PrePreparing { return Ok(()); } trace!("{}: Attempting to summarize block", state); match self.service.summarize_block() { Ok(_) => {} Err(err) => { trace!("Couldn't summarize, so not finalizing: {}", err); return Ok(()); } } // We don't publish a consensus seal at block 1, since we never receive any // votes on the genesis block. Leave payload blank for the first block. let data = if state.seq_num <= 1 { vec![] } else { self.build_seal(state)?.write_to_bytes().map_err(|err| { PbftError::SerializationError("Error writing seal to bytes".into(), err) })? }; match self.service.finalize_block(data) { Ok(block_id) => { info!("{}: Publishing block {}", state, hex::encode(&block_id)); Ok(()) } Err(err) => Err(PbftError::ServiceError( "Couldn't finalize block".into(), err, )), } } /// Check to see if the idle timeout has expired pub fn check_idle_timeout_expired(&mut self, state: &mut PbftState) -> bool { state.idle_timeout.check_expired() } /// Start the idle timeout pub fn start_idle_timeout(&self, state: &mut PbftState) { state.idle_timeout.start(); } /// Check to see if the commit timeout has expired pub fn check_commit_timeout_expired(&mut self, state: &mut PbftState) -> bool { state.commit_timeout.check_expired() } /// Start the commit timeout pub fn start_commit_timeout(&self, state: &mut PbftState) { state.commit_timeout.start(); } /// Check to see if the view change timeout has expired pub fn check_view_change_timeout_expired(&mut self, state: &mut PbftState) -> bool { state.view_change_timeout.check_expired() } // ---------- Methods for communication between nodes ---------- /// Construct a PbftMessage message and broadcast it to all peers (including self) fn broadcast_pbft_message( &mut self, view: u64, seq_num: u64, msg_type: PbftMessageType, block_id: BlockId, state: &mut PbftState, ) -> Result<(), PbftError> { let mut msg = PbftMessage::new(); msg.set_info(PbftMessageInfo::new_from( msg_type, view, seq_num, state.id.clone(), )); msg.set_block_id(block_id); trace!("{}: Created PBFT message: {:?}", state, msg); self.broadcast_message(ParsedMessage::from_pbft_message(msg)?, state) } /// Broadcast the specified message to all of the node's peers, including itself fn broadcast_message( &mut self, msg: ParsedMessage, state: &mut PbftState, ) -> Result<(), PbftError> { // Broadcast to peers self.service .broadcast( String::from(msg.info().get_msg_type()).as_str(), msg.message_bytes.clone(), ) .unwrap_or_else(|err| { error!( "Couldn't broadcast message ({:?}) due to error: {}", msg, err ) }); // Send to self self.on_peer_message(msg, state) } /// Build a consensus seal for the last block this node committed and send it to the node that /// requested the seal (the `recipient`) #[allow(clippy::ptr_arg)] fn send_seal_response( &mut self, state: &PbftState, recipient: &PeerId, ) -> Result<(), PbftError> { let seal = self.build_seal(state).map_err(|err| { PbftError::InternalError(format!("Failed to build requested seal due to: {}", err)) })?; let msg_bytes = seal.write_to_bytes().map_err(|err| { PbftError::SerializationError("Error writing seal to bytes".into(), err) })?; // Send the seal to the requester self.service .send_to( recipient, String::from(PbftMessageType::Seal).as_str(), msg_bytes, ) .map_err(|err| { PbftError::ServiceError( format!( "Failed to send requested seal to {:?}", hex::encode(recipient) ), err, ) }) } // ---------- Miscellaneous methods ---------- /// Start a view change when this node suspects that the primary is faulty /// /// Update state to reflect that the node is now in the process of this view change, start the /// view change timeout, and broadcast a view change message /// /// # Panics /// + If the view change timeout overflows pub fn start_view_change(&mut self, state: &mut PbftState, view: u64) -> Result<(), PbftError> { // Do not send messages again if we are already in the midst of this or a later view change if match state.mode { PbftMode::ViewChanging(v) => view <= v, _ => false, } { return Ok(()); } info!("{}: Starting change to view {}", state, view); state.mode = PbftMode::ViewChanging(view); // Stop the idle and commit timeouts because they are not needed until after the view // change state.idle_timeout.stop(); state.commit_timeout.stop(); // Stop the view change timeout if it is already active (will be restarted when 2f + 1 // ViewChange messages for the new view are received) state.view_change_timeout.stop(); // Broadcast the view change message self.broadcast_pbft_message( view, state.seq_num - 1, PbftMessageType::ViewChange, BlockId::new(), state, ) } } #[cfg(test)] mod tests { use super::*; use crate::engine::test_handle_update; use crate::hash::hash_sha512; use crate::message_type::PbftMessageWrapper; use crate::protos::pbft_message::PbftMessageInfo; use crate::test_helpers::*; use sawtooth_sdk::consensus::engine::{Error, PeerId, PeerMessage, Update}; use sawtooth_sdk::signing::secp256k1::Secp256k1PrivateKey; use serde_json; use std::cell::RefCell; use std::collections::HashMap; use std::default::Default; use std::rc::Rc; /// Turns a series of items into a `Vec<String>` for easily tracking and checking for function /// calls to the MockService macro_rules! stringify_func_call { ( $( $x:expr ),* ) => { { let mut output = Vec::new(); $( output.push(format!("{:?}", $x)); )* output } } } /// Implementation of the consensus' `Service` trait that's used to mock out interactions with /// the Sawtooth validator. The `MockService` will track calls to its methods and supports /// configurable return values for some of its methods. #[derive(Clone)] struct MockService { /// A list of function calls, where each function call is a list of the form (func_name, /// arg1, arg2, ...) calls: Rc<RefCell<Vec<Vec<String>>>>, /// For each block ID, the settings value to return when `get_settings` is called settings: Rc<RefCell<HashMap<BlockId, HashMap<String, String>>>>, /// Determines the return value of the `summarize_block` method summarize_block_return_val: Rc<RefCell<Result<Vec<u8>, Error>>>, } impl MockService { /// Create a new `MockService` and set the members setting based on the `PbftConfig` fn new(cfg: &PbftConfig) -> Self { let members: Vec<_> = cfg.members.iter().map(hex::encode).collect(); let service = MockService { calls: Default::default(), settings: Default::default(), summarize_block_return_val: Rc::new(RefCell::new(Ok(Default::default()))), }; // Set the default settings let mut default_settings = HashMap::new(); default_settings.insert( "sawtooth.consensus.pbft.members".to_string(), serde_json::to_string(&members).unwrap(), ); service .settings .borrow_mut() .insert(vec![0], default_settings); service } /// Indicates if the specified method was called fn was_called(&self, method_name: &str) -> bool { self.calls .borrow() .iter() .any(|call| call[0] == format!("{:?}", method_name)) } /// Indicates if the specified method was called with the given arguments (allows partial /// args) fn was_called_with_args(&self, call: Vec<String>) -> bool { self.calls .borrow() .iter() .any(|logged_call| logged_call.starts_with(&call)) } /// Indicates if the specified method was called with the given arguments only once (allows /// partial args) fn was_called_with_args_once(&self, call: Vec<String>) -> bool { self.calls .borrow() .iter() .filter(|logged_call| logged_call.starts_with(&call)) .count() == 1 } } impl Service for MockService { fn send_to( &mut self, peer: &PeerId, message_type: &str, payload: Vec<u8>, ) -> Result<(), Error> { self.calls.borrow_mut().push(stringify_func_call!( "send_to", peer, message_type, payload )); Ok(()) } fn broadcast(&mut self, message_type: &str, payload: Vec<u8>) -> Result<(), Error> { self.calls .borrow_mut() .push(stringify_func_call!("broadcast", message_type, payload)); Ok(()) } fn initialize_block(&mut self, previous_id: Option<BlockId>) -> Result<(), Error> { self.calls .borrow_mut() .push(stringify_func_call!("initialize_block", previous_id)); Ok(()) } fn summarize_block(&mut self) -> Result<Vec<u8>, Error> { self.calls .borrow_mut() .push(stringify_func_call!("summarize_block")); self.summarize_block_return_val .replace(Ok(Default::default())) } fn finalize_block(&mut self, data: Vec<u8>) -> Result<BlockId, Error> { self.calls .borrow_mut() .push(stringify_func_call!("finalize_block", data)); Ok(Default::default()) } fn cancel_block(&mut self) -> Result<(), Error> { self.calls .borrow_mut() .push(stringify_func_call!("cancel_block")); Ok(()) } fn check_blocks(&mut self, priority: Vec<BlockId>) -> Result<(), Error> { self.calls .borrow_mut() .push(stringify_func_call!("check_blocks", priority)); Ok(()) } fn commit_block(&mut self, block_id: BlockId) -> Result<(), Error> { self.calls .borrow_mut() .push(stringify_func_call!("commit_block", block_id)); Ok(()) } fn ignore_block(&mut self, block_id: BlockId) -> Result<(), Error> { self.calls .borrow_mut() .push(stringify_func_call!("ignore_block", block_id)); Ok(()) } fn fail_block(&mut self, block_id: BlockId) -> Result<(), Error> { self.calls .borrow_mut() .push(stringify_func_call!("fail_block", block_id)); Ok(()) } fn get_blocks( &mut self, block_ids: Vec<BlockId>, ) -> Result<HashMap<BlockId, Block>, Error> { self.calls .borrow_mut() .push(stringify_func_call!("get_blocks", block_ids)); Ok(Default::default()) } fn get_chain_head(&mut self) -> Result<Block, Error> { self.calls .borrow_mut() .push(stringify_func_call!("get_chain_head")); Ok(Default::default()) } fn get_settings( &mut self, block_id: BlockId, settings: Vec<String>, ) -> Result<HashMap<String, String>, Error> { self.calls .borrow_mut() .push(stringify_func_call!("get_settings", block_id, settings)); let settings = self.settings.borrow(); Ok(settings .get(&block_id) .unwrap_or_else(|| { // Fall back to defualt settings (in block 0) settings.get(&vec![0]).expect("Default settings not set") }) .clone()) } fn get_state( &mut self, block_id: BlockId, addresses: Vec<String>, ) -> Result<HashMap<String, Vec<u8>>, Error> { self.calls .borrow_mut() .push(stringify_func_call!("get_state", block_id, addresses)); Ok(Default::default()) } } struct KeyPair { pub_key: Vec<u8>, priv_key: Vec<u8>, } /// Create a list of public/private key pairs for the specified number of nodes fn mock_signer_network(size: u8) -> Vec<KeyPair> { let context = create_context("secp256k1").expect("Failed to create context"); (0..size) .map(|_| { let priv_key = context .new_random_private_key() .expect("Failed to generate new private key"); let pub_key = context .get_public_key(&*priv_key) .expect("Failed to get public key"); KeyPair { pub_key: pub_key.as_slice().to_vec(), priv_key: priv_key.as_slice().to_vec(), } }) .collect() } /// Create a mock configuration for the list of signers generated by `mock_signer_network` fn mock_config_from_signer_network(keys: &[KeyPair]) -> PbftConfig { let mut config = PbftConfig::default(); config.members = keys .iter() .map(|key_pair| key_pair.pub_key.clone()) .collect(); config } /// Create a new PbftNode, PbftState, and MockService based on the given config, node ID, and /// chain head fn mock_node( cfg: &PbftConfig, node_id: PeerId, chain_head: Block, ) -> (PbftNode, PbftState, MockService) { let mut state = PbftState::new(node_id.clone(), chain_head.block_num, cfg); let service = MockService::new(cfg); ( PbftNode::new( cfg, chain_head.clone(), vec![], Box::new(service.clone()), &mut state, ), state, service, ) } /// Create a validly-signed PbftSignedVote fn mock_vote( msg_type: PbftMessageType, view: u64, seq_num: u64, block_id: BlockId, signer: &KeyPair, ) -> PbftSignedVote { let info = PbftMessageInfo::new_from(msg_type, view, seq_num, signer.pub_key.clone()); let mut msg = PbftMessage::new(); msg.set_info(info); msg.set_block_id(block_id); let msg_bytes = msg .write_to_bytes() .expect("Failed to write msg to bytes for mock vote"); let mut header = ConsensusPeerMessageHeader::new(); header.set_signer_id(signer.pub_key.clone()); header.set_content_sha512(hash_sha512(&msg_bytes)); let header_bytes = header .write_to_bytes() .expect("Failed to write header to bytes"); let header_signature = hex::decode( create_context("secp256k1") .expect("Failed to create context") .sign( &header_bytes, &Secp256k1PrivateKey::from_hex(&hex::encode(signer.priv_key.clone())) .expect("Failed to create private key from hex"), ) .expect("Failed to sign header"), ) .expect("Failed to decode signed header"); let mut vote = PbftSignedVote::new(); vote.set_header_bytes(header_bytes); vote.set_header_signature(header_signature); vote.set_message_bytes(msg_bytes.to_vec()); vote } /// Create a PbftNewView fn mock_new_view( view: u64, seq_num: u64, signer: &KeyPair, votes: Vec<PbftSignedVote>, ) -> PbftNewView { let mut new_view = PbftNewView::new(); new_view.set_info(PbftMessageInfo::new_from( PbftMessageType::NewView, view, seq_num, signer.pub_key.clone(), )); new_view.set_view_changes(RepeatedField::from(votes)); new_view } /// Create a PbftSeal fn mock_seal( view: u64, seq_num: u64, block_id: BlockId, signer: &KeyPair, votes: Vec<PbftSignedVote>, ) -> PbftSeal { let mut seal = PbftSeal::new(); seal.set_info(PbftMessageInfo::new_from( PbftMessageType::Seal, view, seq_num, signer.pub_key.clone(), )); seal.set_block_id(block_id); seal.set_commit_votes(RepeatedField::from(votes)); seal } /// This test will verify that when the `PbftNode::new` method is called, it will return a /// `PbftNode` after performing the following actions: /// /// 1. Add the chain head to the log /// 2. Set the state's chain head to the block ID of the chain head /// 3. If the chain head has a consensus seal, update view to match the seal's /// 4. Initialize a new block by calling the `Service::initialize_block` method if the node is /// the primary #[test] fn test_node_init() { // Create chain head with a consensus seal let key_pairs = mock_signer_network(3); let mut head = mock_block(2); head.payload = mock_seal( 1, 1, vec![1], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 1, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); // Verify chain head is added to the log, chain head and view are set, and primary calls // Service::initialize_block() let (node1, state1, service1) = mock_node(&mock_config(4), vec![1], head.clone()); assert!(node1.msg_log.get_block_with_id(&head.block_id).is_some()); assert_eq!(vec![2], state1.chain_head); assert_eq!(1, state1.view); assert!(service1.was_called_with_args(stringify_func_call!( "initialize_block", None as Option<BlockId> ))); // Verify non-primary does not call Service::initialize_block() let (_, _, service0) = mock_node(&mock_config(4), vec![0], head.clone()); assert!(!service0.was_called("initialize_block")); } /// To build a valid consensus seal or a valid `NewView` message, nodes must be able to convert /// a series of `ParsedMessage`s into `PbftSignedVote`s that can be included in the protobuf /// messages. The `PbftNode::signed_votes_from_messages` method is responsible for constructing /// a `RepeatedField` protobuf struct that can be placed directly into the `PbftSeal` and /// `PbftNewView` protobuf messages. #[test] fn test_vote_list_construction() { // Create 3 ParsedMessages with different messages, header bytes, and header signatures let mut msg1 = mock_msg(PbftMessageType::Commit, 0, 1, vec![0], vec![1], false); msg1.header_bytes = vec![0, 1, 2]; msg1.header_signature = vec![3, 4, 5]; let mut msg2 = mock_msg(PbftMessageType::Commit, 0, 1, vec![1], vec![1], false); msg2.header_bytes = vec![6, 7, 8]; msg2.header_signature = vec![9, 10, 11]; let mut msg3 = mock_msg(PbftMessageType::Commit, 0, 1, vec![2], vec![1], false); msg3.header_bytes = vec![12, 13, 14]; msg3.header_signature = vec![15, 16, 17]; let msgs = vec![&msg1, &msg2, &msg3]; // Create the PbftSignedVotes let votes = PbftNode::signed_votes_from_messages(&msgs).into_vec(); // Verify that the votes match the original messages msgs.iter().zip(votes.iter()).for_each(|(msg, vote)| { assert_eq!(msg.message_bytes, vote.message_bytes); assert_eq!(msg.header_bytes, vote.header_bytes); assert_eq!(msg.header_signature, vote.header_signature); }); } /// In order to verify that a consensus seal or a `NewView` is correct, nodes must be able to /// verify each of the signed votes that are contained in the seal/`NewView`. `PbftSignedVote`s /// are verified by the `PbftNode::verify_vote` method, which takes as arguments the vote /// itself, the expected vote type, and a closure that evaluates some arbitrary criteria. The /// `verify_vote` method should make sure the vote’s type matches the expected type, the header /// is properly signed, the header’s signer matches the message’s signer, the message hash is /// correct, and that it meets the criteria specified in the closure. /// /// This test verifies that the `verify_vote` method works correctly by testing all cases where /// it should fail and a case where it should succeed. #[test] fn test_vote_verification() { // Generate a public/private key pair let key_pair = mock_signer_network(1).remove(0); // Create a validly-signed Commit vote let valid_vote = mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pair); // Test verification of a valid vote assert_eq!( key_pair.pub_key, PbftNode::verify_vote(&valid_vote, PbftMessageType::Commit, |_| Ok(())) .expect("Valid vote was determined to be invalid") ); // Test verification of a vote with invalid type assert!( PbftNode::verify_vote(&valid_vote, PbftMessageType::ViewChange, |_| Ok(())).is_err() ); // Test verification of a vote that doesn't meet the validation_criteria assert!( PbftNode::verify_vote(&valid_vote, PbftMessageType::Commit, |_| Err( PbftError::InvalidMessage("".into()) )) .is_err() ); // Test verification of a vote with an invalid header signature let mut invalid_header_sig = valid_vote.clone(); invalid_header_sig.set_header_signature(vec![0]); assert!(PbftNode::verify_vote( &invalid_header_sig, PbftMessageType::ViewChange, |_| Ok(()) ) .is_err()); // Test verification of a vote with an invalid message hash let mut invalid_msg_hash = valid_vote.clone(); invalid_msg_hash.set_message_bytes(vec![0]); assert!( PbftNode::verify_vote(&invalid_msg_hash, PbftMessageType::Commit, |_| Ok(())).is_err() ); // Test verification of a vote where the header's signer doesn't match the message's // signers (the vote signer didn't create the message) let bad_key_pair = mock_signer_network(1).remove(0); let other_nodes_message = mock_msg( PbftMessageType::Commit, 0, 1, key_pair.pub_key.clone(), vec![1], false, ); let mut header = ConsensusPeerMessageHeader::new(); header.set_signer_id(bad_key_pair.pub_key.clone()); header.set_content_sha512(hash_sha512(&other_nodes_message.message_bytes)); let header_bytes = header .write_to_bytes() .expect("Failed to write header to bytes"); let header_signature = hex::decode( create_context("secp256k1") .expect("Failed to create context") .sign( &header_bytes, &Secp256k1PrivateKey::from_hex(&hex::encode(bad_key_pair.priv_key.clone())) .expect("Failed to create private key from hex"), ) .expect("Failed to sign header"), ) .expect("Failed to decode signed header"); let mut mismatched_signer = PbftSignedVote::new(); mismatched_signer.set_header_bytes(header_bytes); mismatched_signer.set_header_signature(header_signature); mismatched_signer.set_message_bytes(other_nodes_message.message_bytes.clone()); assert!( PbftNode::verify_vote(&mismatched_signer, PbftMessageType::Commit, |_| Ok(())).is_err() ); } /// Nodes must be able to verify `NewView` messages to ensure that view changes are valid. To /// do this, nodes use the `PbftNode::verify_new_view` method. A `NewView` message is valid if: /// /// 1. It is for a future view (should never view change backwards) /// 2. It is from the primary for the targeted view /// 3. All of the votes are valid `ViewChange` messages as determined by the `verify_vote` /// method and the criteria that each vote’s view must match the `NewView` message’s view /// 4. All of the vote’s are from nodes that are members of the network /// 5. None of the votes are from the new primary that sent this `NewView` message (the /// `NewView` message is an implicit vote from the new primary, so including its own vote /// would be double-voting) /// 6. There are `2f` votes (again, this is really `2f + 1` since the `NewView` message itself /// is an implicit vote) /// /// This test ensures that the `verify_new_view` method properly checks the validity of /// `NewView` messages by checking cases where it should fail and a case where it should /// succeed. #[test] fn test_new_view_verification() { // Create signing keys for a new network and instantiate a new node on the network let key_pairs = mock_signer_network(4); let (mut node, mut state, _) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[0].pub_key.clone(), mock_block(0), ); // Test verification of a valid NewView let valid_msg = mock_new_view( 1, 1, &key_pairs[1], (2..4) .map(|i| mock_vote(PbftMessageType::ViewChange, 1, 1, vec![], &key_pairs[i])) .collect::<Vec<_>>(), ); assert!(node.verify_new_view(&valid_msg, &mut state).is_ok()); // Test verification of a NewView from a previous view let previous_view = mock_new_view( 0, 1, &key_pairs[1], (2..4) .map(|i| mock_vote(PbftMessageType::ViewChange, 0, 1, vec![], &key_pairs[i])) .collect::<Vec<_>>(), ); assert!(node.verify_new_view(&previous_view, &mut state).is_err()); // Test verification of a NewView that is not from the primary for that view let not_from_primary = mock_new_view( 1, 1, &key_pairs[0], (2..4) .map(|i| mock_vote(PbftMessageType::ViewChange, 1, 1, vec![], &key_pairs[i])) .collect::<Vec<_>>(), ); assert!(node.verify_new_view(&not_from_primary, &mut state).is_err()); // Test verification of a NewView where one of the votes isn't a ViewChange let non_view_change_vote = mock_new_view( 1, 1, &key_pairs[1], vec![ mock_vote(PbftMessageType::ViewChange, 1, 1, vec![], &key_pairs[2]), mock_vote(PbftMessageType::Commit, 1, 1, vec![], &key_pairs[3]), ], ); assert!(node .verify_new_view(&non_view_change_vote, &mut state) .is_err()); // Test verification of a NewView that contains a ViewChange vote for a different view let vote_for_different_view = mock_new_view( 1, 1, &key_pairs[1], vec![ mock_vote(PbftMessageType::ViewChange, 1, 1, vec![], &key_pairs[2]), mock_vote(PbftMessageType::ViewChange, 0, 1, vec![], &key_pairs[3]), ], ); assert!(node .verify_new_view(&vote_for_different_view, &mut state) .is_err()); // Test verification of a NewView that contains a vote from a non-member let vote_from_unknown_peer = mock_new_view( 1, 1, &key_pairs[1], vec![ mock_vote(PbftMessageType::ViewChange, 1, 1, vec![], &key_pairs[2]), mock_vote( PbftMessageType::ViewChange, 1, 1, vec![], &mock_signer_network(1).remove(0), ), ], ); assert!(node .verify_new_view(&vote_from_unknown_peer, &mut state) .is_err()); // Test verification of a NewView that contains a vote from the new primary let vote_from_primary = mock_new_view( 1, 1, &key_pairs[1], (1..3) .map(|i| mock_vote(PbftMessageType::ViewChange, 1, 1, vec![], &key_pairs[i])) .collect::<Vec<_>>(), ); assert!(node .verify_new_view(&vote_from_primary, &mut state) .is_err()); // Test verification of a NewView that does not contain enough votes let insufficient_votes = mock_new_view( 1, 1, &key_pairs[1], vec![mock_vote( PbftMessageType::ViewChange, 1, 1, vec![], &key_pairs[2], )], ); assert!(node .verify_new_view(&insufficient_votes, &mut state) .is_err()); } /// Nodes must be able to verify consensus seals to ensure that committed blocks contain valid /// seals for future verification and to perform catch-up. To do this, nodes use the /// `PbftNode::verify_consensus_seal` method. A consensus seal is valid if: /// /// 1. All of the votes are valid Commit messages as determined by the `verify_vote` method and /// the criteria that each vote’s: /// a. Block ID must match the consensus seal’s block ID /// b. View must match the consensus seal’s view /// c. Sequence number must match the consensus seal’s sequence number /// 2. The seal’s signer (as determined by the seal’s `signer_id`) and all of the vote’s /// signers are nodes that were members of the network at the time the block was voted on /// (this is checked by getting the on-chain list of members for the block previous to the /// one the seal verifies, as specified in the `previous_id` argument to the /// `verify_consensus_seal` method) /// 3. None of the votes are from the seal’s signer (producing a seal is an implicit vote from /// the node that constructed it, so including its own vote would be double-voting) /// 4. There are `2f` votes (this is really `2f + 1` voters since the consensus seal itself is /// an implicit vote) /// /// This test ensures that the `verify_consensus_seal` method properly checks the validity of /// consensus seals by checking cases where it should fail and a case where it should succeed. #[test] fn test_consensus_seal_verification() { // Create signing keys for a new network and instantiate a new node on the network let key_pairs = mock_signer_network(4); let (mut node, mut state, service) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[0].pub_key.clone(), mock_block(0), ); // Set the MockService to return a different members list for block_id=[1] let mut block_1_settings = HashMap::new(); block_1_settings.insert( "sawtooth.consensus.pbft.members".to_string(), serde_json::to_string( &vec![ key_pairs[0].pub_key.clone(), key_pairs[2].pub_key.clone(), key_pairs[3].pub_key.clone(), mock_signer_network(1).remove(0).pub_key, ] .iter() .map(hex::encode) .collect::<Vec<_>>(), ) .unwrap(), ); service .settings .borrow_mut() .insert(vec![1], block_1_settings); // Test verification of a valid seal/previous_id let valid_seal = mock_seal( 0, 1, vec![1], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(), ); assert!(node .verify_consensus_seal(&valid_seal, vec![0], &mut state) .is_ok()); // Test verification of a valid seal that has a vote from a node not in the previous block // (using previous_id=[1] gets the list of members set above) let vote_not_in_prev_block = mock_seal( 0, 2, vec![2], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 0, 2, vec![2], &key_pairs[i])) .collect::<Vec<_>>(), ); assert!(node .verify_consensus_seal(&vote_not_in_prev_block, vec![1], &mut state) .is_err()); // Test verification of a seal that contains a vote that is not a Commit let vote_not_commit = mock_seal( 0, 1, vec![1], &key_pairs[0], vec![ mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[1]), mock_vote(PbftMessageType::ViewChange, 0, 1, vec![1], &key_pairs[2]), ], ); assert!(node .verify_consensus_seal(&vote_not_commit, vec![0], &mut state) .is_err()); // Test verification of a seal that contains a vote for a different block let vote_different_block = mock_seal( 0, 1, vec![1], &key_pairs[0], vec![ mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[1]), mock_vote(PbftMessageType::Commit, 0, 1, vec![2], &key_pairs[2]), ], ); assert!(node .verify_consensus_seal(&vote_different_block, vec![0], &mut state) .is_err()); // Test verification of a seal that contains a vote from a different view let vote_different_view = mock_seal( 0, 1, vec![1], &key_pairs[0], vec![ mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[1]), mock_vote(PbftMessageType::Commit, 1, 1, vec![1], &key_pairs[2]), ], ); assert!(node .verify_consensus_seal(&vote_different_view, vec![0], &mut state) .is_err()); // Test verification of a seal that contains a vote from a different sequence number let vote_different_seq_num = mock_seal( 0, 1, vec![1], &key_pairs[0], vec![ mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[1]), mock_vote(PbftMessageType::Commit, 0, 2, vec![1], &key_pairs[2]), ], ); assert!(node .verify_consensus_seal(&vote_different_seq_num, vec![0], &mut state) .is_err()); // Test verification of a seal that contains a vote from the seal's signer let vote_from_signer = mock_seal( 0, 1, vec![1], &key_pairs[0], (0..2) .map(|i| mock_vote(PbftMessageType::Commit, 0, 2, vec![2], &key_pairs[i])) .collect::<Vec<_>>(), ); assert!(node .verify_consensus_seal(&vote_from_signer, vec![0], &mut state) .is_err()); // Test verification of a seal that doesn't contain enough votes let not_enough_votes = mock_seal( 0, 1, vec![1], &key_pairs[0], vec![mock_vote( PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[1], )], ); assert!(node .verify_consensus_seal(&not_enough_votes, vec![0], &mut state) .is_err()); } /// Nodes must be able to extract a consensus seal from a block to verify it for two purposes: /// /// 1. Ensure that the seal is valid so that it can be used to verify the previous block’s /// commit at a later point /// 2. Use the seal to commit the block using the catch-up procedure if the node has fallen /// behind /// /// A consensus seal is stored as a bytes-encoded `PbftSeal` in the block’s payload field. /// Blocks 0 and 1 do not store consensus seals, since block 0 doesn’t have a previous block /// and it is not voted on by consensus (so block 1 won’t have a seal for it). /// /// The consensus seal stored in a block is extracted, verified, and returned by the /// `PbftNode::verify_consensus_seal_from_block` method. A block’s consensus seal is deemed /// valid if: /// /// 1. There is actually a parsable consensus seal in the block’s payload field /// 2. The seal’s block ID is the same as the block’s previous ID (since the seal should be for /// the block previous to this one) /// 3. The seal itself is valid as determined by the `verify_consensus_seal` method, with the /// `previous_id` of the current block’s previous block (the one validated by the seal) used /// as the `previous_id` argument to `verify_consensus_seal`. /// /// This test ensures that the `verify_consensus_seal_from_block` method properly checks the /// validity of blocks’ consensus seals by checking cases where it should fail and a case where /// it should succeed. #[test] fn test_consensus_seal_from_block_verification() { // Create signing keys for a new network and instantiate a new node on the network let key_pairs = mock_signer_network(4); let (mut node, mut state, _) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[0].pub_key.clone(), mock_block(0), ); // Verify that block 1 need not have a seal let block1 = mock_block(1); assert!(node .verify_consensus_seal_from_block(&block1, &mut state) .is_ok()); // Add block 1 to the node's log so it can be used to verify the seal for block 2 node.msg_log.add_validated_block(block1); // Test verification of a block with an empty payload let mut block2 = mock_block(2); assert!(node .verify_consensus_seal_from_block(&block2, &mut state) .is_err()); // Test verification of a block whose seal doesn't match the block's previous ID (previous // ID of block 2 is [1]) block2.payload = mock_seal( 0, 1, vec![0], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 0, 1, vec![0], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); assert!(node .verify_consensus_seal_from_block(&block2, &mut state) .is_err()); // Test verification of a block whose seal isn't valid (e.g. doesn't have enough votes) block2.payload = mock_seal( 0, 1, vec![1], &key_pairs[0], vec![mock_vote( PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[1], )], ) .write_to_bytes() .expect("Failed to write seal to bytes"); assert!(node .verify_consensus_seal_from_block(&block2, &mut state) .is_err()); // Test verification of a block with a valid seal, and make sure the returned seal is the // same as the original let valid_seal = mock_seal( 0, 1, vec![1], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(), ); block2.payload = valid_seal .write_to_bytes() .expect("Failed to write seal to bytes"); assert_eq!( valid_seal, node.verify_consensus_seal_from_block(&block2, &mut state) .expect("Result should be valid") ); } /// To publish a valid block with a verifiable proof for the commit of the previous block, /// nodes must be able to build a valid consensus seal for the last block that the node /// committed. To build the seal, the node will have to have in its log: /// /// 1. The previously committed block, which has `block_num = state.seq_num - 1` /// `2f + 1` matching Commit messages for the previously committed block (same type, seq_num, /// view, and block_id) that are from different nodes (different signer_id’s) /// /// While the `2f + 1` messages must all have a matching view, they could be from any past view /// since the block could have been committed in any past view. /// /// Consensus seals are built using the `PbftNode::build_seal` method, which checks its log for /// `2f` matching Commit messages for the last committed block that are from other nodes /// (doesn’t include own vote, since the seal itself is an implicit vote) and also retrieves /// the view the block was committed in. /// /// This test verifies that the `build_seal` method properly produces a consensus seal when it /// should, and that it does not produce a seal when it is unable to. #[test] fn test_consensus_seal_building() { // Create signing keys for a new network and instantiate a new node on the network let key_pairs = mock_signer_network(4); let (mut node, mut state, _) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[0].pub_key.clone(), mock_block(1), ); // Add a group of messages with signed components to the node's log node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 1, key_pairs[0].pub_key.clone(), vec![1], true, )); node.msg_log.add_message( ParsedMessage::from_signed_vote(&mock_vote( PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[1], )) .expect("Failed to parse vote"), ); node.msg_log.add_message( ParsedMessage::from_signed_vote(&mock_vote( PbftMessageType::Commit, 0, 1, vec![2], &key_pairs[2], )) .expect("Failed to parse vote"), ); node.msg_log.add_message( ParsedMessage::from_signed_vote(&mock_vote( PbftMessageType::Commit, 1, 1, vec![1], &key_pairs[2], )) .expect("Failed to parse vote"), ); node.msg_log.add_message( ParsedMessage::from_signed_vote(&mock_vote( PbftMessageType::Commit, 0, 2, vec![1], &key_pairs[2], )) .expect("Failed to parse vote"), ); node.msg_log.add_message( ParsedMessage::from_signed_vote(&mock_vote( PbftMessageType::Prepare, 0, 1, vec![1], &key_pairs[2], )) .expect("Failed to parse vote"), ); // Verify that seal cannot be built yet (have 2f matching messages for a block at the last // seq_num from different signers, but one is the seal signer's own) assert!(node.build_seal(&mut state).is_err()); // Add another Commit message so there are 2f matching messages from other nodes node.msg_log.add_message( ParsedMessage::from_signed_vote(&mock_vote( PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[2], )) .expect("Failed to parse vote"), ); // Verify that a valid seal can be built now let seal1 = node .build_seal(&mut state) .expect("Seal building shouldn't fail"); assert!(node .verify_consensus_seal(&seal1, vec![0], &mut state) .is_ok()); // Set the node's view to 2 and verify that a valid seal can still be built when the Commit // messages are from a past view state.view = 2; let seal2 = node .build_seal(&mut state) .expect("Seal building shouldn't fail"); assert!(node .verify_consensus_seal(&seal2, vec![0], &mut state) .is_ok()); } /// The `PbftNode::try_publish` method, which is called at every iteration of the engine’s main /// loop, is responsible for determining when a node should finalize a block that it is /// building. A node will finalize a block when: /// /// 1. It is the leader /// 2. It is in the PrePreparing phase /// 3. A block has been initialized (calls to `summarize_block` will fail if no block is /// initialized) /// 4. The block can be summarized successfully (this means the block is ready to be finalized /// from the validator’s perspective) /// /// The block must be finalized with a valid consensus seal for the previous block in order for /// the other nodes to accept it, since the consensus seal is necessary to verify that the /// previous block was committed properly. /// /// This test verifies that the `try_publish` method properly determines when the in-progress /// block should be finalized. #[test] #[allow(unused_must_use)] fn test_publishing() { // Create a new node on a 4 node network let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(1)); // Add messages necessary to build a valid seal for block 1 node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 1, vec![0], vec![1], true, )); node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 1, vec![1], vec![1], false, )); node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 1, vec![2], vec![1], false, )); // Update the state to view 1 (so node isn’t primary) and call try_publish(); verify that // finalize_block() was not called state.view = 1; assert!(node.try_publish(&mut state).is_ok()); assert!(!service.was_called("finalize_block")); // Reset the state’s view to 0 and update its phase to Preparing, then call try_publish(); // verify that finalize_block() was not called state.view = 0; state.phase = PbftPhase::Preparing; assert!(node.try_publish(&mut state).is_ok()); assert!(!service.was_called("finalize_block")); // Reset the state’s phase to PrePreparing and update the mock Service so its // summarize_block() method returns an Err result; call try_publish() and verify that // finalize_block() was not called state.phase = PbftPhase::PrePreparing; service .summarize_block_return_val .replace(Err(Error::BlockNotReady)); assert!(node.try_publish(&mut state).is_ok()); assert!(service.was_called("summarize_block")); assert!(!service.was_called("finalize_block")); // Update the mock Service so its summarize_block() method returns Ok again, then call // try_publish() and verify that finalize_block() is called with a seal for block 1 service .summarize_block_return_val .replace(Ok(Default::default())); assert!(node.try_publish(&mut state).is_ok()); assert!(service.was_called_with_args(stringify_func_call!( "finalize_block", node.build_seal(&mut state) .expect("Failed to build seal") .write_to_bytes() .expect("Failed to write seal to bytes") ))); } /// As a consensus engine, PBFT must make sure that every block it receives has certain /// characteristics to be considered valid: /// /// 1. The block must not be older than the chain head (since PBFT is non-forking and final, it /// will never go back and commit an old block) /// 2. The node must already have the previous block, since it can’t verify the block’s /// consensus seal without it /// 3. The block’s previous block must have the previous block number (block number must be /// strictly monotonically increasing by one) /// 4. The block's grandparent (it's previous block's previous block) must already be committed /// before the block can be considered. /// 5. The block’s consensus seal must be valid as determined by the /// `PbftNode::verify_consensus_seal_from_block` method, since any block that gets committed /// to the chain must contain a valid proof for the block before it (which is required to /// uphold finality, provide external verification, and enable the catch-up procedure) /// /// Criteria (1-3) are checked immediately when the block is received; if the block does not /// meet any of these criteria, it should be failed. Otherwise, if it passes this step, it /// should be added to the log as an unvalidated block and its validity should be checked using /// the service. If a BlockValid update is received by the node, it should mark the block as /// validated, then check criterion (4). If criterion (4) is not met, the block is not failed; /// instead, the node should simply skip further handling of the block until the block's /// grandparent is committed, at which point the node will evaluate criterion (5) (this will /// happen in the call to on_block_commit for the grandparent). Once criterion (4) is met, the /// node will check criteria (5); if this criterion is not met, the block should be failed. /// /// This test ensures that these criteria are enforced when a block is received using the /// `PbftNode::on_block_new` method. #[test] #[allow(unused_must_use)] fn test_block_acceptance_and_validation() { // Create signing keys for a new network and instantiate a new node on the network with // block 3 as the chain head let key_pairs = mock_signer_network(4); let (mut node, mut state, service) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[0].pub_key.clone(), mock_block(3), ); // Verify old blocks are rejected immediately when they are received let mut old_block = mock_block(2); old_block.payload = mock_seal( 0, 1, vec![1], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.on_block_new(old_block, &mut state); assert!(service.was_called_with_args(stringify_func_call!("fail_block", vec![2]))); assert!(node.msg_log.block_validated(vec![2]).is_none()); assert!(node.msg_log.get_block_with_id(&[2]).is_none()); // Verify blocks are rejected immediately when node doesn't have previous block let mut no_previous_block = mock_block(5); no_previous_block.payload = mock_seal( 0, 4, vec![4], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 0, 4, vec![4], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.on_block_new(no_previous_block.clone(), &mut state); assert!(service.was_called_with_args(stringify_func_call!("fail_block", vec![5]))); assert!(node.msg_log.block_validated(vec![5]).is_none()); assert!(node.msg_log.get_block_with_id(&[5]).is_none()); // Verify blocks are rejected immediately when the previous block doesn't have the previous // block num let mut previous_block_not_previous_num = mock_block(5); previous_block_not_previous_num.previous_id = vec![3]; previous_block_not_previous_num.payload = mock_seal( 0, 3, vec![3], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 0, 3, vec![3], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.on_block_new(previous_block_not_previous_num.clone(), &mut state); // called more than once now assert!(!service.was_called_with_args_once(stringify_func_call!("fail_block", vec![5]))); assert!(node.msg_log.block_validated(vec![5]).is_none()); assert!(node.msg_log.get_block_with_id(&[5]).is_none()); // Verify blocks aren't handled before the grandparent block is committed (this block is // actually invalid because of its seal, but it won't be failed because it can't properly // be verified yet) node.msg_log.add_validated_block(mock_block(5)); let mut invalid_block_but_not_ready = mock_block(6); invalid_block_but_not_ready.payload = mock_seal( 0, 5, vec![5], &key_pairs[0], vec![mock_vote( PbftMessageType::Commit, 0, 5, vec![5], &key_pairs[1], )], ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.on_block_new(invalid_block_but_not_ready.clone(), &mut state); assert!(service.was_called_with_args(stringify_func_call!("check_blocks", vec![vec![6]]))); node.on_block_valid(invalid_block_but_not_ready.block_id.clone(), &mut state); assert!(!service.was_called_with_args(stringify_func_call!("fail_block", vec![6]))); assert!(node.msg_log.block_validated(vec![6]).is_none()); assert!(node.msg_log.get_block_with_id(&[6]).is_some()); // Verify blocks with invalid seals (e.g. not enough votes) are rejected after the block is // validated by the validator let mut invalid_seal = mock_block(4); invalid_seal.payload = mock_seal( 0, 3, vec![3], &key_pairs[0], vec![mock_vote( PbftMessageType::Commit, 0, 3, vec![3], &key_pairs[1], )], ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.on_block_new(invalid_seal.clone(), &mut state); assert!(service.was_called_with_args(stringify_func_call!("check_blocks", vec![vec![4]]))); node.on_block_valid(invalid_seal.block_id.clone(), &mut state); assert!(service.was_called_with_args(stringify_func_call!("fail_block", vec![4]))); assert!(node.msg_log.block_validated(vec![4]).is_none()); assert!(node.msg_log.get_block_with_id(&[4]).is_some()); // Verify valid blocks are accepted and added to the log let mut valid_block = mock_block(4); valid_block.payload = mock_seal( 0, 3, vec![3], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 0, 3, vec![3], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.on_block_new(valid_block.clone(), &mut state); // called more than once now assert!( !service.was_called_with_args_once(stringify_func_call!("check_blocks", vec![vec![4]])) ); node.on_block_valid(valid_block.block_id.clone(), &mut state); // shouldn't have called fail_block again assert!(service.was_called_with_args_once(stringify_func_call!("fail_block", vec![4]))); assert!(node.msg_log.get_block_with_id(&[4]).is_some()); } /// After receiving a block and checking it using the service, the consensus engine may be /// notified that the block is actually invalid. In this case, PBFT should drop the block from /// its log and fail the block. #[test] fn test_invalid_block() { let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(0)); // Get a BlockNew and a BlockInvalid assert!(node.on_block_new(mock_block(1), &mut state).is_ok()); assert!(node.on_block_invalid(vec![1]).is_ok()); // Verify that the blog is no longer in the log and it has been failed assert!(node.msg_log.block_validated(vec![1]).is_none()); assert!(node.msg_log.get_block_with_id(vec![1].as_slice()).is_none()); assert!(service.was_called_with_args(stringify_func_call!("fail_block", vec![1]))); } /// After a primary creates and publishes a block to the network, it needs to send out a /// PrePrepare message to endorse that block as the one for the network to perform consensus on /// for that sequence number. /// /// This action should be performed only by the primary, because only the primary’s PrePrepare /// will be accepted by the other nodes in the network. Also, the primary should only broadcast /// a PrePrepare message for a block that it created itself; this protects the network from /// malicious, non-primary nodes that attempt to create a block and have the legitimate primary /// unwittingly broadcast a PrePrepare for it. #[test] #[allow(unused_must_use)] fn test_pre_prepare_broadcasting() { // Create a primary node let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(0)); // Create a block from a different node and pass it to the primary node; verify that the // primary doesn't broadcast a PrePrepare for the block let mut different_signer = mock_block(1); different_signer.signer_id = vec![1]; node.on_block_new(different_signer.clone(), &mut state); node.on_block_valid(different_signer.block_id, &mut state); assert!(!service.was_called("broadcast")); // Update the node's view to 1 so it is no longer the primary, and pass a block to it that // it created; verify that the node doesn't broadcast a PrePrepare for the block state.view = 1; let mut own_block = mock_block(1); own_block.signer_id = vec![0]; node.on_block_new(own_block.clone(), &mut state); node.on_block_valid(own_block.block_id.clone(), &mut state); assert!(!service.was_called("broadcast")); // Reset the node's view to 0 so it is primary again and pass its own block to it again; // verify that the mock Service’s broadcast method was called with a PrePrepare message for // the block at the current view and sequence number state.view = 0; node.on_block_new(own_block.clone(), &mut state); node.on_block_valid(own_block.block_id.clone(), &mut state); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "PrePrepare", mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![0], vec![1], false).message_bytes ))); } /// Part of validating all PBFT messages is ensuring each message actually originates from the /// node that signed. If this is not verified, a malicious node could “spoof” other nodes’ /// messages and send duplicate votes that seem to be different. /// /// To make the task of verifying the origin of messages easier, the validator verifies the /// signature of each PeerMessage that it sends to the consensus engine. Each PBFT message has /// a `signer_id` field that is not verified by the validator, but can be compared with the /// `signer_id` of the PeerMessage to conclusively determine if the node that created the PBFT /// message is the same as the node that signed that message. /// /// This verification is performed by the `handle_update` method in `engine.rs`; its /// functionality will be tested by supplying a `PeerMessage` where the `signer_id` matches the /// contained message’s `signer_id`, as well as supplying a `PeerMessage` where the `signer_id` /// does not match the contained message’s `signer_id`. #[test] fn test_message_signing() { let (mut node, mut state, _) = mock_node(&mock_config(4), vec![0], mock_block(0)); // Call handle_update() with a PeerMessage that has a different signer_id than the PBFT // message it contains and verify that the result is Err let mut invalid_peer_message = PeerMessage::default(); invalid_peer_message.header.signer_id = vec![2]; invalid_peer_message.header.message_type = "PrePrepare".into(); invalid_peer_message.content = mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![1], vec![1], false).message_bytes; assert!(test_handle_update( &mut node, Ok(Update::PeerMessage(invalid_peer_message, vec![2])), &mut state ) .is_err()); // Call handle_update() with a PeerMessage that has the same signer_id as the PBFT message // it contains and verify that the result is Ok let mut valid_peer_message = PeerMessage::default(); valid_peer_message.header.signer_id = vec![1]; valid_peer_message.header.message_type = "PrePrepare".into(); valid_peer_message.content = mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![1], vec![1], false).message_bytes; assert!(test_handle_update( &mut node, Ok(Update::PeerMessage(valid_peer_message, vec![1])), &mut state ) .is_ok()); } /// A node should ignore all messages that aren’t from known members of the network, but accept /// those that are. Messages that originate from unknown nodes should not be treated as valid /// messages, since PBFT has closed membership and only a network-accepted list of members are /// allowed to participate. /// /// This test ensures that the node properly identifies messages that come from PBFT members /// and those that don’t. #[test] fn test_message_signer_membership() { // Create a new node let (mut node, mut state, _) = mock_node(&mock_config(4), vec![0], mock_block(0)); // Call the node’s on_peer_message() method with a message from a peer that’s not a member // of the network; verify that the result is an Err assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![4], vec![1], false), &mut state ) .is_err()); // Call on_peer_message() again with a message from a peer that is a member of the network; // verify the result is Ok assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![3], vec![1], false), &mut state ) .is_ok()); } /// The primary sends a PrePrepare message after publishing a block to endorse that block as /// the one to perform consensus on for the current sequence number. The secondary nodes will /// accept this PrePrepare message, add the message to their logs, and begin to perform /// consensus on the block (by moving to the Preparing phase) as long as the PrePrepare is /// valid. The PrePrepare is valid if: /// /// 1. It is from the primary for the node’s current view /// 2. Its view is the same as the node’s current view /// 3. There isn’t an existing PrePrepare for this sequence number and view that is for a /// different block /// /// This test ensures that all 3 of these conditions are properly checked when a PrePrepare /// message is received and passed to the `PbftNode::on_peer_message` method; the node will not /// add the message to the log if any of the conditions are violated, but will add the message /// to the log if they are all met. #[test] #[allow(unused_must_use)] fn test_pre_prepare_checking() { // Create a new node let (mut node, mut state, _) = mock_node(&mock_config(4), vec![0], mock_block(0)); // Verify PrePrepares that are not from the primary are rejected node.on_peer_message( mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![1], vec![1], false), &mut state, ); assert_eq!( 0, node.msg_log .get_messages_of_type_seq(PbftMessageType::PrePrepare, 1) .len() ); // Verify PrePrepares that are not for the current view are rejected node.on_peer_message( mock_msg(PbftMessageType::PrePrepare, 1, 1, vec![0], vec![1], false), &mut state, ); assert_eq!( 0, node.msg_log .get_messages_of_type_seq(PbftMessageType::PrePrepare, 1) .len() ); // Verify that valid PrePrepares are accepted and added to the log let valid_pre_prepare = mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![0], vec![1], false); node.on_peer_message(valid_pre_prepare.clone(), &mut state); { let res1 = node .msg_log .get_messages_of_type_seq(PbftMessageType::PrePrepare, 1); assert_eq!(1, res1.len()); assert_eq!(&valid_pre_prepare, res1[0]); } // Verify that another PrePrepare with a mismatched block is rejected node.on_peer_message( mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![0], vec![2], false), &mut state, ); let res2 = node .msg_log .get_messages_of_type_seq(PbftMessageType::PrePrepare, 1); assert_eq!(1, res2.len()); assert_eq!(&valid_pre_prepare, res2[0]); } /// In the PrePreparing phase, the first phase of the PBFT algorithm, the primary creates and /// publishes a block, then endorses that block with a `PrePrepare` message. When a node in the /// PrePreparing phase has a valid block and a valid `PrePrepare` message for its current /// sequence number, it should: /// /// 1. Switch to the Preparing phase /// 2. Stop the idle timeout (since the primary completed its job of producing a block and /// endorsing it) /// 3. Start the commit timeout (as a backup in case something goes wrong and the network gets /// stuck; if so, the timeout will expire and a new view will be started to ensure progress /// will be made) /// 4. (Only secondary nodes) Broadcast a `Prepare` message for the primary’s endorsed block /// with the current view and sequence number to all members of the network /// /// Formally, to complete the PrePreparing phase and perform the above actions for some /// sequence number n, the following must be true of the node: /// /// 1. The node is in the PrePreparing phase (it isn’t already done with PrePreparing) /// 2. The node is on sequence number n /// 3. The node has a valid block in its log for the sequence number n /// 4. The node has a valid `PrePrepare` in its log for the block in (3) (the sequence number /// of the `PrePrepare` must match the block’s block number) /// /// (1) and (2) are closely related; the only time (2) changes (the sequence number gets /// incremented) is when a block gets committed, at which point the phase is set to /// PrePreparing (because a block was committed, the node restarts at the beginning phase). /// Thus, there are really 3 events that must happen for PrePreparing to be complete: /// /// 1. The node committed a block for sequence number n - 1, so it is now PrePreparing for /// sequence number n /// 2. A valid block for sequence number n is received and added to the log /// 3. A valid `PrePrepare` for the block in (2) is received and added to the log /// /// Typically, these 3 events will happen in order, but this is not always the case; it is /// possible, for instance, for a node to receive a block and `PrePrepare` for sequence number /// n before block n - 1 is committed. /// /// There is also an additional check of the `PrePrepare` that is necessary for the /// PrePreparing phase to be complete: the `PrePrepare`’s sequence number must be checked to /// verify that it matches the block’s block number. This is required to enforce a one-to-one /// correlation between a block’s number and sequence number at which the block is committed. /// This check must be done here instead of when the `PrePrepare` is received, because the node /// may not yet have the block in question when the `PrePrepare` is received. /// /// This test verifies that the node completes the PrePreparing phase and performs the proper /// actions iff the required conditions are true, that these required conditions can be met in /// any order, and that the `PrePrepare`’s sequence number matches the block’s block number. #[test] fn test_pre_preparing_phase() { // Create signing keys for a new network and instantiate a new secondary node on the // network; verify that it is PrePreparing let key_pairs = mock_signer_network(4); let (mut node, mut state, service) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[1].pub_key.clone(), mock_block(0), ); assert_eq!(1, state.seq_num); assert_eq!(PbftPhase::PrePreparing, state.phase); // Create blocks 1-9 let mut blocks = (1..10).map(|i| { let mut block = mock_block(i); block.payload = mock_seal( 0, (i - 1).into(), vec![i - 1], &key_pairs[0], (1..3) .map(|j| { mock_vote( PbftMessageType::Commit, 0, (i - 1).into(), vec![i - 1], &key_pairs[j], ) }) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); block }); // Add block 1 so the node can receive block 2 node.msg_log.add_validated_block(blocks.next().unwrap()); // Verify order Commit -> Block -> PrePrepare // Simulate block 1 commit state.phase = PbftPhase::Finishing(false); assert!(node.on_block_commit(vec![1], &mut state).is_ok()); assert_eq!(2, state.seq_num); assert_eq!(PbftPhase::PrePreparing, state.phase); // Receive block 2 (BlockNew and BlockValid) assert!(node .on_block_new(blocks.next().unwrap(), &mut state) .is_ok()); assert!(node.on_block_valid(vec![2], &mut state).is_ok()); assert_eq!(PbftPhase::PrePreparing, state.phase); // Receive PrePrepare for block 2 assert!(node .on_peer_message( mock_msg( PbftMessageType::PrePrepare, 0, 2, key_pairs[0].pub_key.clone(), vec![2], false, ), &mut state, ) .is_ok()); // Check appropriate actions performed assert_eq!(PbftPhase::Preparing, state.phase); assert!(!state.idle_timeout.is_active()); assert!(state.commit_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "Prepare", mock_msg( PbftMessageType::Prepare, 0, 2, key_pairs[1].pub_key.clone(), vec![2], false, ) .message_bytes ))); // Verify order Commit -> PrePrepare -> Block // Simulate block 2 commit state.phase = PbftPhase::Finishing(false); assert!(node.on_block_commit(vec![2], &mut state).is_ok()); assert_eq!(3, state.seq_num); assert_eq!(PbftPhase::PrePreparing, state.phase); // Receive PrePrepare for block 3 assert!(node .on_peer_message( mock_msg( PbftMessageType::PrePrepare, 0, 3, key_pairs[0].pub_key.clone(), vec![3], false, ), &mut state, ) .is_ok()); assert_eq!(PbftPhase::PrePreparing, state.phase); // Receive block 3 (BlockNew and BlockValid) assert!(node .on_block_new(blocks.next().unwrap(), &mut state) .is_ok()); assert!(node.on_block_valid(vec![3], &mut state).is_ok()); // Check appropriate actions performed assert_eq!(PbftPhase::Preparing, state.phase); assert!(!state.idle_timeout.is_active()); assert!(state.commit_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "Prepare", mock_msg( PbftMessageType::Prepare, 0, 3, key_pairs[1].pub_key.clone(), vec![3], false, ) .message_bytes ))); // Verify order Block -> Commit -> PrePrepare // Receive block 4 (BlockNew and BlockValid; set phase to Finishing, otherwise catch-up // occurs) state.phase = PbftPhase::Finishing(false); assert!(node .on_block_new(blocks.next().unwrap(), &mut state) .is_ok()); assert!(node.on_block_valid(vec![4], &mut state).is_ok()); assert_eq!(PbftPhase::Finishing(false), state.phase); // Simulate block 3 commit assert!(node.on_block_commit(vec![3], &mut state).is_ok()); assert_eq!(4, state.seq_num); assert_eq!(PbftPhase::PrePreparing, state.phase); // Receive PrePrepare for block 4 assert!(node .on_peer_message( mock_msg( PbftMessageType::PrePrepare, 0, 4, key_pairs[0].pub_key.clone(), vec![4], false, ), &mut state, ) .is_ok()); // Check appropriate actions performed assert_eq!(PbftPhase::Preparing, state.phase); assert!(!state.idle_timeout.is_active()); assert!(state.commit_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "Prepare", mock_msg( PbftMessageType::Prepare, 0, 4, key_pairs[1].pub_key.clone(), vec![4], false, ) .message_bytes ))); // Verify order Block -> PrePrepare -> Commit // Receive block 5 (BlockNew and BlockValid; set phase to Finishing, otherwise catch-up // occurs) state.phase = PbftPhase::Finishing(false); assert!(node .on_block_new(blocks.next().unwrap(), &mut state) .is_ok()); assert!(node.on_block_valid(vec![5], &mut state).is_ok()); assert_eq!(PbftPhase::Finishing(false), state.phase); // Receive PrePrepare for block 5 (still Finishing because block 4 has not been committed // yet) assert!(node .on_peer_message( mock_msg( PbftMessageType::PrePrepare, 0, 5, key_pairs[0].pub_key.clone(), vec![5], false, ), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Finishing(false), state.phase); // Simulate block 4 commit assert!(node.on_block_commit(vec![4], &mut state).is_ok()); assert_eq!(5, state.seq_num); // Check appropriate actions performed assert_eq!(PbftPhase::Preparing, state.phase); assert!(!state.idle_timeout.is_active()); assert!(state.commit_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "Prepare", mock_msg( PbftMessageType::Prepare, 0, 5, key_pairs[1].pub_key.clone(), vec![5], false, ) .message_bytes ))); // Verify order PrePrepare -> Commit -> Block // Receive PrePrepare for block 6 (still Preparing because block 5 has not been committed // yet) assert!(node .on_peer_message( mock_msg( PbftMessageType::PrePrepare, 0, 6, key_pairs[0].pub_key.clone(), vec![6], false, ), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Preparing, state.phase); // Simulate block 5 commit state.phase = PbftPhase::Finishing(false); assert!(node.on_block_commit(vec![5], &mut state).is_ok()); assert_eq!(6, state.seq_num); // Receive block 6 (BlockNew and BlockValid) assert!(node .on_block_new(blocks.next().unwrap(), &mut state) .is_ok()); assert!(node.on_block_valid(vec![6], &mut state).is_ok()); // Check appropriate actions performed assert_eq!(PbftPhase::Preparing, state.phase); assert!(!state.idle_timeout.is_active()); assert!(state.commit_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "Prepare", mock_msg( PbftMessageType::Prepare, 0, 6, key_pairs[1].pub_key.clone(), vec![6], false, ) .message_bytes ))); // Verify order PrePrepare -> Block -> Commit // Receive PrePrepare for block 7 (still Preparing because block 6 has not been committed // yet) assert!(node .on_peer_message( mock_msg( PbftMessageType::PrePrepare, 0, 7, key_pairs[0].pub_key.clone(), vec![7], false, ), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Preparing, state.phase); // Receive block 7 (BlockNew and BlockValid; set phase to Finishing, otherwise catch-up // occurs) state.phase = PbftPhase::Finishing(false); assert!(node .on_block_new(blocks.next().unwrap(), &mut state) .is_ok()); assert!(node.on_block_valid(vec![7], &mut state).is_ok()); assert_eq!(PbftPhase::Finishing(false), state.phase); // Simulate block 6 commit assert!(node.on_block_commit(vec![6], &mut state).is_ok()); assert_eq!(7, state.seq_num); // Check appropriate actions performed assert_eq!(PbftPhase::Preparing, state.phase); assert!(!state.idle_timeout.is_active()); assert!(state.commit_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "Prepare", mock_msg( PbftMessageType::Prepare, 0, 7, key_pairs[1].pub_key.clone(), vec![7], false, ) .message_bytes ))); // Verify that PrePrepare’s sequence number must match the block’s number // Receive blocks 8 and 9 (BlockNew and BlockValid) assert!(node .on_block_new(blocks.next().unwrap(), &mut state) .is_ok()); assert!(node.on_block_valid(vec![8], &mut state).is_ok()); assert!(node .on_block_new(blocks.next().unwrap(), &mut state) .is_ok()); assert!(node.on_block_valid(vec![9], &mut state).is_ok()); // Set the node to PrePreparing at seq_num 8 state.phase = PbftPhase::PrePreparing; state.seq_num = 8; // Receive PrePrepare for sequence number 8 but block 9 assert!(node .on_peer_message( mock_msg( PbftMessageType::PrePrepare, 0, 8, key_pairs[0].pub_key.clone(), vec![9], false, ), &mut state, ) .is_ok()); // Verify node is still in the PrePreparing phase (PrePrepare.seq_num != Block.block_num) assert_eq!(PbftPhase::PrePreparing, state.phase); } /// In the Preparing phase, which is the first round of consensus that the network performs on /// a block, the node will accept valid `Prepare` messages (`Prepare` messages are accepted as /// valid as long as they’re for the current view and they’re not from the current primary). /// For a node to complete the Preparing phase and move on to the Committing phase, the /// following must be true: /// /// 1. The node is in the Preparing phase /// 2. The node has a valid `PrePrepare` for the current view and sequence number /// 3. The node has `2f + 1` `Prepare` messages that match the `PrePrepare` (same view, /// sequence number, and block ID) for the node’s current sequence number, all from /// different nodes /// /// These conditions are checked when the node receives a `Prepare` message; thus, receiving a /// `Prepare` message is the trigger for checking whether to move on to the Committing phase. /// Normally condition (1) will be met before (2), but this is not always the case; sometimes /// the node will receive all the required `Prepare` messages before entering the Preparing /// phase. This is not a problem, though, because part of switching from the PrePreparing to /// the Preparing phase is broadcasting a `Prepare` message, which also self-sends a `Prepare` /// message, so the conditions will be checked and the node will be able to move on to the /// Committing phase. /// /// When the node has completed the `Preparing` phase, it will perform the following actions: /// /// 1. Switch to the Committing phase /// 2. Broadcast a `Commit` message to the whole network /// /// This test will verify that the node will complete the Preparing phase and perform the above /// actions iff the necessary conditions are met. #[test] #[allow(unused_must_use)] fn test_preparing_phase() { // Create a new node 1 with a 6 node config and set its phase to Preparing let (mut node, mut state, service) = mock_node(&mock_config(6), vec![1], mock_block(0)); state.phase = PbftPhase::Preparing; // Verify that invalid Prepares (from different view or from current primary) are rejected node.on_peer_message( mock_msg(PbftMessageType::Prepare, 1, 1, vec![2], vec![1], false), &mut state, ); node.on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![0], vec![1], false), &mut state, ); assert_eq!( 0, node.msg_log .get_messages_of_type_seq(PbftMessageType::Prepare, 1) .len() ); // Prepare from primary triggers a view change, so reset mode state.mode = PbftMode::Normal; // Add two valid Prepares and verify the node is still Preparing (hasn't received enough) assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![2], vec![1], false), &mut state, ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![3], vec![1], false), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Preparing, state.phase); // Verify Prepares' block IDs must match assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![4], vec![2], false), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Preparing, state.phase); // Verify Prepares must be for current sequence number assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 2, vec![2], vec![2], false), &mut state, ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 2, vec![3], vec![2], false), &mut state, ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 2, vec![4], vec![2], false), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Preparing, state.phase); // Verify that there must be a matching PrePrepare (even after 2f + 1 Prepares) assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![4], vec![1], false), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Preparing, state.phase); // Receive the PrePrepare and node's own Prepare; verify node is committing and has // broadcasted a valid Commit message assert!(node .on_peer_message( mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![0], vec![1], false), &mut state, ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![1], vec![1], true), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Committing, state.phase); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "Commit", mock_msg(PbftMessageType::Commit, 0, 1, vec![1], vec![1], false).message_bytes ))); // Verify transition only happens once, Commit broadcast doesn't happen again assert!(node .on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![5], vec![1], false), &mut state, ) .is_ok()); assert!(service.was_called_with_args_once(stringify_func_call!( "broadcast", "Commit", mock_msg(PbftMessageType::Commit, 0, 1, vec![1], vec![1], false).message_bytes ))); } /// In the Committing phase, which is the second round of consensus that the network performs /// on a block, the node will accept valid `Commit` messages (`Commit` messages are accepted as /// valid as long as they’re for the current view). For a node to complete the Committing phase /// and commit a block, the following must be true: /// /// 1. The node is in the Committing phase /// 2. The node has a valid `PrePrepare` for the current view and sequence number /// 3. The node has `2f + 1` `Commit` messages that match the `PrePrepare` (same view, sequence /// number, and block ID) for the node’s current sequence number, all from different nodes /// /// These conditions are checked when the node receives a `Commit` message; thus, receiving a /// `Commit` message is the trigger for checking whether to commit a block and move on to the /// Finishing phase. Normally condition (1) will be met before (2), but this is not always the /// case; sometimes the node will receive all the required `Commit` messages before entering /// the Committing phase. This is not a problem, though, because part of switching from the /// Preparing to the Committing phase is broadcasting a `Commit` message, which also self-sends /// a `Commit` message, so the conditions will be checked and the node will be able to commit /// the block. /// /// When the node has completed the Committing phase, it will perform the following actions: /// /// 1. Commit the block /// 2. Switch to the Finishing phase /// 3. Stop the commit timeout /// /// This test will verify that the node will complete the Committing phase and perform the /// above actions iff the necessary conditions are met. #[test] #[allow(unused_must_use)] fn test_committing_phase() { // Create a new node 0 with a 5 node config; set its phase to Committing and start its // commit timeout let (mut node, mut state, service) = mock_node(&mock_config(5), vec![0], mock_block(0)); state.phase = PbftPhase::Committing; state.commit_timeout.start(); // Verify that Commits from a different view are rejected node.on_peer_message( mock_msg(PbftMessageType::Commit, 1, 1, vec![1], vec![1], false), &mut state, ); assert_eq!( 0, node.msg_log .get_messages_of_type_seq(PbftMessageType::Prepare, 1) .len() ); // Add two valid Commits and verify the node is still Committing (hasn't received enough) assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![1], vec![1], false), &mut state, ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![2], vec![1], false), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Committing, state.phase); // Verify Commits' block IDs must match assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![3], vec![2], false), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Committing, state.phase); // Verify Commits must be for current sequence number assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 2, vec![1], vec![2], false), &mut state, ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 2, vec![2], vec![2], false), &mut state, ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 2, vec![3], vec![2], false), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Committing, state.phase); // Verify that there must be a matching PrePrepare (even after 2f + 1 Commits) assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![3], vec![1], false), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Committing, state.phase); // Receive the PrePrepare and node's own Commit; verify node is in the Finishing(false) // phase, commit timeout is stopped, and block was committed assert!(node .on_peer_message( mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![0], vec![1], false), &mut state, ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![0], vec![1], true), &mut state, ) .is_ok()); assert_eq!(PbftPhase::Finishing(false), state.phase); assert!(!state.commit_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!("commit_block", vec![1]))); // Verify transition only happens once, block commit doesn't happen again assert!(node .on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![4], vec![1], false), &mut state, ) .is_ok()); assert!(service.was_called_with_args_once(stringify_func_call!("commit_block", vec![1]))); } /// When a block gets committed through the standard procedure (i.e., not the catch-up /// procedure), an iteration of the PBFT algorithm is considered “completed” and the node is /// ready to start over again for the next sequence number/block. In order to do this, the node /// will have to update its state when a block gets committed and perform some other necessary /// actions: /// /// 1. The sequence number will be incremented by 1 /// 2. The node’s phase will be reset to PrePreparing /// 3. The node’s mode will be set to Normal /// 4. The node's chain head will be updated /// 5. The idle timeout will be started /// 6. The view will be incremented by 1 iff the node is at a forced view change /// 7. The primary (and only the primary) will initialize a new block /// /// (1-5) are necessary for the node to be ready to start the next iteration of the algorithm, /// (6) is required to implement the regular view changes RFC, and (7) is a prerequisite for /// the primary to be able to publish a block for the next sequence number. /// /// The validator will send a notification to the PBFT engine when a block gets committed, and /// the PBFT engine will handle this notification with the PbftNode::on_block_commit() method. #[test] fn test_block_commit_update() { // Initialize node 0 with a 4 node config and set the node's phase to Finishing(false) let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(0)); state.phase = PbftPhase::Finishing(false); // Simulate block commit notification for block 1; verify that node properly updates its // state and initializes a new block (it's the primary) assert!(node.on_block_commit(vec![1], &mut state).is_ok()); assert_eq!(2, state.seq_num); assert_eq!(PbftPhase::PrePreparing, state.phase); assert_eq!(PbftMode::Normal, state.mode); assert_eq!(vec![1], state.chain_head); assert_eq!(0, state.view); assert!(state.idle_timeout.is_active()); assert!( service.was_called_with_args(stringify_func_call!("initialize_block", Some(vec![1]))) ); // Turn off idle timeout and reset phase to Finishing(false) state.idle_timeout.stop(); state.phase = PbftPhase::Finishing(false); // Set the node's forced_view_change_interval to 3 and its mode to ViewChanging state.forced_view_change_interval = 3; state.mode = PbftMode::ViewChanging(1); // Simulate block commit notification for block 2; verify that node properly updates its // state and does NOT initialize a new block (it's no longer the primary because of a // forced view change) assert!(node.on_block_commit(vec![2], &mut state).is_ok()); assert_eq!(3, state.seq_num); assert_eq!(PbftPhase::PrePreparing, state.phase); assert_eq!(PbftMode::Normal, state.mode); assert_eq!(vec![2], state.chain_head); assert_eq!(1, state.view); assert!(state.idle_timeout.is_active()); assert!( !service.was_called_with_args(stringify_func_call!("initialize_block", Some(vec![2]))) ); } /// Dynamic membership is an important aspect of any practical distributed system; there must /// be a mechanism for adding and removing nodes in the event of new members joining or an /// existing member malfunctioning. /// /// Membership changes in Sawtooth PBFT are dictated by the on-chain setting /// `sawtooth.consensus.pbft.members`, which contains a list of the network’s members. When /// this on-chain setting is updated in a block and that block gets committed, the PBFT nodes /// must update their local lists of members and value of `f` (the maximum number of faulty /// nodes) to match the changes. /// /// This functionality is tested using a mock consensus `Service` that will produce different /// values for different `block_id`s. Testing will ensure that the list of members in the /// node’s state is updated when the on-chain list changes in any way (either changing which /// nodes are present or changing the ordering), and that the value of `f` is updated /// accordingly (should panic if it is 0). #[test] #[should_panic(expected = "This network no longer contains enough nodes to be fault tolerant")] #[allow(unused_must_use)] fn test_membership_changes() { // Initialize a node with a 6 node config let (mut node, mut state, service) = mock_node(&mock_config(6), vec![0], mock_block(0)); // Update the mock Service's get_settings() method to return a members list with an added // node at block 1, re-ordered at block 2, and a network that is too small at block 3 let mut block_1_settings = HashMap::new(); let block_1_members = vec![ vec![0], vec![1], vec![2], vec![3], vec![4], vec![5], vec![6], ]; block_1_settings.insert( "sawtooth.consensus.pbft.members".to_string(), serde_json::to_string(&block_1_members.iter().map(hex::encode).collect::<Vec<_>>()) .unwrap(), ); service .settings .borrow_mut() .insert(vec![1], block_1_settings); let mut block_2_settings = HashMap::new(); let block_2_members = vec![ vec![1], vec![0], vec![2], vec![3], vec![4], vec![5], vec![6], ]; block_2_settings.insert( "sawtooth.consensus.pbft.members".to_string(), serde_json::to_string(&block_2_members.iter().map(hex::encode).collect::<Vec<_>>()) .unwrap(), ); service .settings .borrow_mut() .insert(vec![2], block_2_settings); let mut block_3_settings = HashMap::new(); block_3_settings.insert( "sawtooth.consensus.pbft.members".to_string(), serde_json::to_string( &vec![vec![0], vec![1], vec![2]] .iter() .map(hex::encode) .collect::<Vec<_>>(), ) .unwrap(), ); service .settings .borrow_mut() .insert(vec![3], block_3_settings); // Simulate block commit for block 1; verify node's members list is updated properly and f is // now 2 assert!(node.on_block_commit(vec![1], &mut state).is_ok()); assert_eq!(block_1_members, state.member_ids); assert_eq!(2, state.f); // Simulate block commit for block 2; verify node's members list is updated properly and f is // still 2 assert!(node.on_block_commit(vec![2], &mut state).is_ok()); assert_eq!(block_2_members, state.member_ids); assert_eq!(2, state.f); // Simulate block commit for block 3; verify that it panics (not enough members) node.on_block_commit(vec![3], &mut state); } /// To keep memory usage under control, the PBFT log must be garbage-collected periodically. /// Every time a block gets committed (the node moves on to the next sequence number), the node /// will check if the number of messages in its logs exceeds a certain size; if it does, it /// will clean up old messages and blocks. /// /// The node must always retain the committed block at the previous sequence number as well as /// the `Commit` messages for the previous sequence number, because it needs these to produce a /// valid consensus seal. Thus, when the log is garbage-collected, all messages and blocks that /// are older than the node’s previous sequence number (< node’s sequence number - 1) are /// removed from the log. #[test] fn test_garbage_collection() { // Initialize a new node and set the max_log_size field of the node’s log to 2 let (mut node, mut state, _) = mock_node(&mock_config(4), vec![0], mock_block(0)); node.msg_log.set_max_log_size(2); // Add Block and PrePrepare for sequence numbers 1 and 2 node.msg_log.add_validated_block(mock_block(1)); node.msg_log.add_validated_block(mock_block(2)); node.msg_log.add_message(mock_msg( PbftMessageType::PrePrepare, 0, 1, vec![0], vec![1], false, )); node.msg_log.add_message(mock_msg( PbftMessageType::PrePrepare, 0, 2, vec![0], vec![2], false, )); // Simulate commit of block 1; verify node is now at seq_num 2 and all messages are still // in the log since they all have seq_num >= state.seq_num - 1 state.phase = PbftPhase::Finishing(false); assert!(node.on_block_commit(vec![1], &mut state).is_ok()); assert_eq!(2, state.seq_num); assert!(node.msg_log.get_block_with_id(&vec![1]).is_some()); assert!(node.msg_log.get_block_with_id(&vec![2]).is_some()); assert!(node.msg_log.has_pre_prepare(1, 0, &vec![1])); assert!(node.msg_log.has_pre_prepare(2, 0, &vec![2])); // Simulate commit of block 2; verify node is now at seq_num 3 and messages for seq_num 2 // are no longer in the log state.phase = PbftPhase::Finishing(false); assert!(node.on_block_commit(vec![2], &mut state).is_ok()); assert_eq!(3, state.seq_num); assert!(node.msg_log.get_block_with_id(&vec![1]).is_none()); assert!(node.msg_log.get_block_with_id(&vec![2]).is_some()); assert!(!node.msg_log.has_pre_prepare(1, 0, &vec![1])); assert!(node.msg_log.has_pre_prepare(2, 0, &vec![2])); } /// To guarantee liveness in the presence of potentially faulty nodes, PBFT provides the view /// changing procedure to move to a new view and institute a new primary. When starting the /// view change procedure, a node will need to perform the following actions: /// /// 1. Update its mode to ViewChanging(v), where `v` is the view number that it is attempting /// to change to /// 2. Stop both the idle and commit timeouts, since these are not needed during the view /// change procedure /// 3. Stop the view change timeout if it is already started /// 4. Broadcast a `ViewChange` message for the new view /// /// These actions should only be performed once for a particular view change, however; a view /// change can be initiated based on multiple conditions, and it’s possible for several of /// these situations to be encountered. Therefore, the node must guard itself from broadcasting /// a view change message twice for the same view. /// /// Initiating a view change is handled by the `PbftNode::start_view_change` method. This test /// will ensure that the method performs all of the actions listed above and guards itself from /// duplicate broadcasting of `ViewChange` messages #[test] #[allow(unused_must_use)] fn test_view_change_starting() { // Initialize a new node; start its idle, commit, and view change timeouts let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(0)); state.idle_timeout.start(); state.commit_timeout.start(); state.view_change_timeout.start(); // Start a view change for view 1 and verify that the state is updated appropriately assert!(node.start_view_change(&mut state, 1).is_ok()); assert_eq!(PbftMode::ViewChanging(1), state.mode); assert!(!state.idle_timeout.is_active()); assert!(!state.commit_timeout.is_active()); assert!(!state.view_change_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "ViewChange", mock_msg(PbftMessageType::ViewChange, 1, 0, vec![0], vec![], false).message_bytes ))); // Verify ViewChange message can't be broadcasted again for the same view node.start_view_change(&mut state, 1); assert!(service.was_called_with_args_once(stringify_func_call!( "broadcast", "ViewChange", mock_msg(PbftMessageType::ViewChange, 1, 0, vec![0], vec![], false).message_bytes ))); // Start another view change for view 2 and verify that the state is updated appropriately state.idle_timeout.start(); state.commit_timeout.start(); state.view_change_timeout.start(); assert!(node.start_view_change(&mut state, 2).is_ok()); assert_eq!(PbftMode::ViewChanging(2), state.mode); assert!(!state.idle_timeout.is_active()); assert!(!state.commit_timeout.is_active()); assert!(!state.view_change_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "ViewChange", mock_msg(PbftMessageType::ViewChange, 2, 0, vec![0], vec![], false).message_bytes ))); } /// When a node is view changing, it should not accept any messages that are not `ViewChange`s /// or `NewView`s. This allows the node to prioritize the view changing procedure and not be /// affected by messages not related to view changes. #[test] #[allow(unused_must_use)] fn test_message_ignoring_while_view_changing() { // Initialize a new node and set its mode to ViewChanging(1) let (mut node, mut state, _) = mock_node(&mock_config(4), vec![0], mock_block(0)); state.mode = PbftMode::ViewChanging(1); // Receive PrePrepare, Prepare, and Commit messages; verify that they are all ignored node.on_peer_message( mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![1], vec![1], false), &mut state, ); node.on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![1], vec![1], false), &mut state, ); node.on_peer_message( mock_msg(PbftMessageType::Commit, 0, 1, vec![1], vec![1], false), &mut state, ); assert_eq!( 0, node.msg_log .get_messages_of_type_seq(PbftMessageType::PrePrepare, 1) .len() ); assert_eq!( 0, node.msg_log .get_messages_of_type_seq(PbftMessageType::Prepare, 1) .len() ); assert_eq!( 0, node.msg_log .get_messages_of_type_seq(PbftMessageType::Commit, 1) .len() ); } /// A view change should be started by a node if any of the following occur: /// /// 1. The idle timeout expires /// 2. The commit timeout expires /// 3. The view change timeout expires /// 4. A PrePrepare is received from the current primary, but the node already has a PrePrepare /// from the primary at the same view and sequence number but for a different block /// 5. A Prepare is received from the current primary /// 6. The node receives f + 1 matching ViewChange messages for a future view /// /// (1) makes sure that a primary does not stall the network indefinitely by never producing a /// block or PrePrepare (see https://github.com/hyperledger/sawtooth-rfcs/pull/29 for more /// information). In this situation, the targeted view change will be `v + 1`, where `v` is the /// node’s current view. /// /// (2) makes sure that the network does not get stuck forever if something goes wrong; if the /// network does get stuck, the timer will eventually time out, the view will change, and the /// network’s progress will resume. In this situation, the targeted view change will be /// `v + 1`, where `v` is the node’s current view. /// /// (3) makes sure that the new primary will send a NewView message to complete the view change /// within a reasonable amount of time; if it does not, the network will try another view /// change. In this situation, the targeted view change will be `v' + 1`, where `v'` is the /// view the node was already attempting to change to. /// /// (4) makes sure that a primary does not endorse more than one block with `PrePrepare` /// messages, so the network can agree on a single block to vote on. In this situation, the /// targeted view change will be `v + 1`, where `v` is the node’s current view. /// /// (5) makes sure that the primary does not get a `Prepare` vote, since its PrePrepare counts /// as its “vote” for the first round of consensus. /// /// (6) makes sure that a node does not start view changing too late; when `f + 1` matching /// `ViewChange` messages have been received, the node can be sure that at least one non-faulty /// node has started a view change, so it can start view changing as well. In this situation, /// the targeted view change will be `v`, where `v` is the view specified in the `f + 1` /// `ViewChange` messages. /// /// All of these situations should be tested to ensure that they are triggered when (and only /// when) expected, and that the targeted view is correct. /// /// NOTE: View changes for events (1-3) are not tested here, because they are implemented in /// the main engine loop which is difficult to test. #[test] #[allow(unused_must_use)] fn test_view_change_initiation_conditions() { // Initialize a new node let (mut node, mut state, _) = mock_node(&mock_config(4), vec![1], mock_block(0)); // Verify receiving two PrePrepares for the same view and sequence number but with // different blocks triggers a view change node.on_peer_message( mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![0], vec![1], false), &mut state, ); node.on_peer_message( mock_msg(PbftMessageType::PrePrepare, 0, 1, vec![0], vec![2], false), &mut state, ); assert_eq!(PbftMode::ViewChanging(1), state.mode); // Verify receiving a Prepare from the current primary triggers a view change state.mode = PbftMode::Normal; node.on_peer_message( mock_msg(PbftMessageType::Prepare, 0, 1, vec![0], vec![1], false), &mut state, ); assert_eq!(PbftMode::ViewChanging(1), state.mode); // Verify receiving f + 1 ViewChanges starts the view change early state.mode = PbftMode::Normal; node.on_peer_message( mock_msg(PbftMessageType::ViewChange, 2, 0, vec![2], vec![], false), &mut state, ); node.on_peer_message( mock_msg(PbftMessageType::ViewChange, 2, 0, vec![3], vec![], false), &mut state, ); assert_eq!(PbftMode::ViewChanging(2), state.mode); } /// To perform a view change, the network votes on the view change by broadcasting `ViewChange` /// messages. Nodes will accept these `ViewChange` messages and add them to their logs if they /// are valid. To be valid, a `ViewChange` message must follow these rules: /// /// 1. If the node is already in the midst of a view change for view `v` (it is in mode /// ViewChanging(v)), the `ViewChange` must be for a view >= v. /// 2. If the node is not already view changing (it is in Normal mode), the `ViewChange` must /// be for a view greater than the node’s current view. /// /// These conditions ensure that no old (stale) view change messages are added to the log. /// /// When a node has `2f + 1` `ViewChange` messages for a view, it will start its view change /// timeout to ensure that the new primary produces a `NewView` in a reasonable amount of time. /// The appropriate duration of the view change timeout is calculated based on a base duration /// (defined by the state object’s `view_change_duration` field) using the formula: `(desired /// view number - node’s current view number) * view_change_duration`. /// /// When the new primary for the view specified in the `ViewChange` message has `2f + 1` /// `ViewChange` messages for that view, it will broadcast a `NewView` message to the network. /// Only the new primary should broadcast the `NewView` message. /// /// This test ensures that only non-stale `ViewChange` messages are accepted, nodes start their /// view change timeouts with the appropriate duration when `2f + 1` `ViewChange` messages are /// received, and that the new primary (and only the new primary) broadcasts a `NewView` when /// it has the required messages in its log. #[test] #[allow(unused_must_use)] fn test_view_change_acceptance() { // Initialize node 0 let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(0)); // Verify that a ViewChange message for the node's current view is ignored node.on_peer_message( mock_msg(PbftMessageType::ViewChange, 0, 0, vec![1], vec![], false), &mut state, ); assert_eq!( 0, node.msg_log .get_messages_of_type_view(PbftMessageType::ViewChange, 0) .len() ); // Verify that a ViewChnage for a future view is accepted and added to the log let vc1 = mock_msg(PbftMessageType::ViewChange, 1, 0, vec![1], vec![], false); assert!(node.on_peer_message(vc1.clone(), &mut state).is_ok()); assert_eq!( &&vc1, node.msg_log .get_messages_of_type_view(PbftMessageType::ViewChange, 1) .get(0) .expect("ViewChange should be in log") ); // Verify that a ViewChange message is ignored if the node is already in the process of a // view change to a later view state.mode = PbftMode::ViewChanging(3); node.on_peer_message( mock_msg(PbftMessageType::ViewChange, 2, 0, vec![1], vec![], false), &mut state, ); assert_eq!( 0, node.msg_log .get_messages_of_type_view(PbftMessageType::ViewChange, 2) .len() ); // Verify NewView is not broadcasted by new primary when there aren't 2f + 1 ViewChanges state.mode = PbftMode::ViewChanging(4); assert!(node .on_peer_message( mock_msg(PbftMessageType::ViewChange, 4, 0, vec![0], vec![], true), &mut state ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::ViewChange, 4, 0, vec![1], vec![], false), &mut state ) .is_ok()); assert!(!service.was_called_with_args(stringify_func_call!("broadcast", "NewView"))); // Verify NewView is broadcasted by new primary when there are 2f + 1 ViewChanges node.on_peer_message( mock_msg(PbftMessageType::ViewChange, 4, 0, vec![2], vec![], false), &mut state, ); assert!(service.was_called_with_args(stringify_func_call!("broadcast", "NewView"))); // Verify NewView is not broadcasted when node is not the new primary state.view_change_timeout.stop(); state.view = 4; state.mode = PbftMode::ViewChanging(5); assert!(node .on_peer_message( mock_msg(PbftMessageType::ViewChange, 5, 0, vec![0], vec![], true), &mut state ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::ViewChange, 5, 0, vec![1], vec![], false), &mut state ) .is_ok()); node.on_peer_message( mock_msg(PbftMessageType::ViewChange, 5, 0, vec![3], vec![], false), &mut state, ); // Verify broadcast only happened once (for view 4, not this view) assert!(service.was_called_with_args_once(stringify_func_call!("broadcast", "NewView"))); // Verify view change timeout is started assert!(state.view_change_timeout.is_active()); assert_eq!( state.view_change_duration, state.view_change_timeout.duration() ); // Verify view change timeout uses the appropriate duration, and that it is not started // until 2f + 1 ViewChanges are received state.view_change_timeout.stop(); state.mode = PbftMode::ViewChanging(6); assert!(node .on_peer_message( mock_msg(PbftMessageType::ViewChange, 6, 0, vec![0], vec![], true), &mut state ) .is_ok()); assert!(node .on_peer_message( mock_msg(PbftMessageType::ViewChange, 6, 0, vec![1], vec![], false), &mut state ) .is_ok()); assert!(!state.view_change_timeout.is_active()); assert!(node .on_peer_message( mock_msg(PbftMessageType::ViewChange, 6, 0, vec![2], vec![], false), &mut state, ) .is_ok()); assert!(state.view_change_timeout.is_active()); assert_eq!( state .view_change_duration .checked_mul(2) .expect("Couldn't double view change duration"), state.view_change_timeout.duration() ); } /// When the node that will become primary as the result of a view change has accepted `2f + 1` /// matching `ViewChange` messages for the new view, it will construct a `NewView` message that /// contains the required `ViewChange` messages and broadcast it to the network. When a node /// receives this `NewView` message, it will check that the message is valid (as determined by /// the `PbftNode::verify_new_view` method). If the `NewView` message is not valid, it will be /// ignored; if it is valid, the node will perform the following actions: /// /// 1. Update its view to that of the `NewView` message /// 2. Stop the view change timeout, since it is no longer needed /// 3. Set its phase to PrePreparing, unless it is in the Finishing phase /// 4. Set its mode to Normal /// 5. Start the idle timeout /// /// In addition, the node that was previously the primary will cancel any block it may have /// initialized, and the new primary node (and only the new primary) will initialize a new /// block for the current sequence number. /// /// Furthermore, `NewView` messages can be for any future view, not just the view after the one /// the node is on; they must also be acceptable even if the node is not in the ViewChanging /// mode (perhaps because the node missed all of the `ViewChange` messages). /// /// These actions are necessary to complete the view changing procedure and resume normal /// operation of the PBFT network with the new primary. #[test] #[allow(unused_must_use)] fn test_new_view_acceptance() { // Create signing keys for a new network and instantiate node 1; set its mode to // ViewChanging(1) and start the view change timeout let key_pairs = mock_signer_network(4); let (mut node, mut state, service) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[1].pub_key.clone(), mock_block(0), ); state.mode = PbftMode::ViewChanging(1); state.view_change_timeout.start(); // Verify that a NewView from a node that isn't the new primary is rejected let mut nv1 = PbftNewView::new(); nv1.set_info(PbftMessageInfo::new_from( PbftMessageType::NewView, 1, 0, key_pairs[0].pub_key.clone(), )); nv1.set_view_changes(RepeatedField::from(vec![ mock_vote(PbftMessageType::ViewChange, 1, 0, vec![], &key_pairs[1]), mock_vote(PbftMessageType::ViewChange, 1, 0, vec![], &key_pairs[2]), ])); node.on_peer_message( ParsedMessage::from_new_view_message(nv1).expect("Failed to parse nv1"), &mut state, ); assert_eq!(PbftMode::ViewChanging(1), state.mode); // Verify that a valid NewView from the new primary is accepted and the node updates its // state appropriately (node 1 is the new priamry, so it should initialize a block) let mut nv2 = PbftNewView::new(); nv2.set_info(PbftMessageInfo::new_from( PbftMessageType::NewView, 1, 0, key_pairs[1].pub_key.clone(), )); nv2.set_view_changes(RepeatedField::from(vec![ mock_vote(PbftMessageType::ViewChange, 1, 0, vec![], &key_pairs[0]), mock_vote(PbftMessageType::ViewChange, 1, 0, vec![], &key_pairs[2]), ])); node.on_peer_message( ParsedMessage::from_new_view_message(nv2).expect("Failed to parse nv2"), &mut state, ); assert_eq!(1, state.view); assert_eq!(PbftPhase::PrePreparing, state.phase); assert_eq!(PbftMode::Normal, state.mode); assert!(!state.view_change_timeout.is_active()); assert!(state.idle_timeout.is_active()); assert!(service.was_called("initialize_block")); // Verify that a valid NewView for any future view is accepted and node updates its state // appropriately (node 1 is the old primary, so it will cancel any initialized block and it // won't init new block again, phase should remain Finishing) state.phase = PbftPhase::Finishing(false); state.idle_timeout.stop(); state.view_change_timeout.start(); let mut nv3 = PbftNewView::new(); nv3.set_info(PbftMessageInfo::new_from( PbftMessageType::NewView, 3, 0, key_pairs[3].pub_key.clone(), )); nv3.set_view_changes(RepeatedField::from(vec![ mock_vote(PbftMessageType::ViewChange, 3, 0, vec![], &key_pairs[0]), mock_vote(PbftMessageType::ViewChange, 3, 0, vec![], &key_pairs[1]), ])); node.on_peer_message( ParsedMessage::from_new_view_message(nv3).expect("Failed to parse nv3"), &mut state, ); assert_eq!(3, state.view); assert_eq!(PbftPhase::Finishing(false), state.phase); assert_eq!(PbftMode::Normal, state.mode); assert!(!state.view_change_timeout.is_active()); assert!(state.idle_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!("cancel_block"))); assert!(service.was_called_with_args_once(stringify_func_call!("initialize_block"))); } /// If a node falls behind, or if a new node is added to an existing network, the node will /// need to “catch up” to the rest of the network by committing all of the blocks to get to /// that point. The catch-up procedure exists for this purpose. /// /// To commit a block n using the catch-up procedure, the node must have a valid consensus seal /// for block n. With this consensus seal, the node can use the `PbftNode::catchup` method to /// perform the following actions: /// /// 1. Extract all of the votes/messages from the consensus seal and add them to its log, along /// with the signed bytes (header bytes and header signature) /// 2. Update the node’s view if the messages in the seal are from a different view /// 3. Tell the validator to commit the block that’s verified by the consensus seal /// 4. Stop the idle timeout /// 5. Update its phase to Finishing /// /// (1) allows the node to build the seal for the commit block in the future if necessary. (2) /// allows the node to keep up with view changes as it catches up. (3) is done to actually /// commit the block. (4) is done because the primary that produced this block was not faulty. /// (5) prepares the node to receive the commit notification from the validator. /// /// The `catchup` method assumes that the seal has already been verified. /// /// This test will verify that the `PbftNode::catchup` method performs the above actions when /// it is provided a valid consensus seal. #[test] fn test_catch_up_commit() { // Create signing keys for a new network and instantiate node 1 let key_pairs = mock_signer_network(4); let (mut node, mut state, service) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[1].pub_key.clone(), mock_block(0), ); // Start the node's idle timeout and verify it is active state.idle_timeout.start(); assert!(state.idle_timeout.is_active()); // Construct a valid consensus seal for block 1 with votes from view 1 and catch up with it let votes = (2..4) .map(|i| mock_vote(PbftMessageType::Commit, 1, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(); let seal = mock_seal(1, 1, vec![1], &key_pairs[0], votes.clone()); assert!(node.catchup(&mut state, &seal, true).is_ok()); // Verify catch up was done correctly let node_2_vote = node .msg_log .get_messages_of_type_seq(PbftMessageType::Commit, 1) .iter() .find(|msg| msg.info().get_signer_id() == key_pairs[2].pub_key.as_slice()) .cloned() .expect("Node2's vote is not in log"); assert_eq!(votes[0].message_bytes, node_2_vote.message_bytes); assert_eq!(votes[0].header_bytes, node_2_vote.header_bytes); assert_eq!(votes[0].header_signature, node_2_vote.header_signature); let node_3_vote = node .msg_log .get_messages_of_type_seq(PbftMessageType::Commit, 1) .iter() .find(|msg| msg.info().get_signer_id() == key_pairs[3].pub_key.as_slice()) .cloned() .expect("Node3's vote is not in log"); assert_eq!(votes[1].message_bytes, node_3_vote.message_bytes); assert_eq!(votes[1].header_bytes, node_3_vote.header_bytes); assert_eq!(votes[1].header_signature, node_3_vote.header_signature); assert_eq!(1, state.view); assert_eq!(PbftPhase::Finishing(true), state.phase); assert!(!state.idle_timeout.is_active()); assert!(service.was_called_with_args(stringify_func_call!("commit_block", vec![1]))); } /// One of the ways that the catch-up procedure is triggered is when the node is on /// block/sequence number `n` and it receives a block `n + 1` that has a valid seal for block /// `n`. In this situation, the node can use the consensus to go ahead and commit block `n`. /// /// However, there is one caveat: the node may already have instructed the validator to commit /// block `n`, and is just waiting for the confirmation from the validator that the block was /// committed. To handle this, the node must check that it is not waiting for the commit /// confirmation (it will be in the `Finishing` phase if it is waiting). #[test] fn test_catch_up_on_new_block() { // Create signing keys for a new network and instantiate node 1 let key_pairs = mock_signer_network(4); let (mut node, mut state, service) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[1].pub_key.clone(), mock_block(0), ); // Receive block 1 (BlockNew and BlockValid) and verify that node is still PrePreparing // (should not perform catch-up for current block) assert!(node.on_block_new(mock_block(1), &mut state).is_ok()); assert!(node.on_block_valid(vec![1], &mut state).is_ok()); assert_eq!(PbftPhase::PrePreparing, state.phase); // Receive block 2 (BlockNew and BlockValid) and verify that catch up was performed for // block 1 (phase is Finishing(true) and Service.commit_block(block1.block_id) was called) let mut block2 = mock_block(2); block2.payload = mock_seal( 0, 1, vec![1], &key_pairs[0], (2..4) .map(|i| mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); assert!(node.on_block_new(block2.clone(), &mut state).is_ok()); assert!(node.on_block_valid(vec![2], &mut state).is_ok()); assert_eq!(PbftPhase::Finishing(true), state.phase); assert!(service.was_called_with_args(stringify_func_call!("commit_block", vec![1]))); // Receive block 2 again and verify that Service.commit_block was not called again (block // was already committed) assert!(node.on_block_new(block2, &mut state).is_ok()); assert!(node.on_block_valid(vec![2], &mut state).is_ok()); assert!(service.was_called_with_args_once(stringify_func_call!("commit_block"))); } /// When a node that is on block/seq_num `n` receives a block `m` (where `m > n + 1`), it will /// not be able to commit block `m - 1` using catch-up right away; instead, it will have to /// wait until block `m - 2` is committed before committing block `m - 1`. To commit block /// `m - 1` using the catch-up procedure in this scenario: when block `m - 2` is committed, the /// node will check if it has a block `m` in its log; if it does, it can perform catch-up to /// commit block `m - 1` using the seal in block `m`. #[test] fn test_catch_up_on_block_commit() { // Create signing keys for a new network and instantiate node 1; set node's phase to // Finishing(true) and make sure its sequence number is 1 let key_pairs = mock_signer_network(4); let (mut node, mut state, service) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[1].pub_key.clone(), mock_block(0), ); state.phase = PbftPhase::Finishing(true); assert_eq!(1, state.seq_num); // Add blocks 2 and 3 to the node's log let mut block2 = mock_block(2); block2.payload = mock_seal( 0, 1, vec![1], &key_pairs[0], (2..4) .map(|i| mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.msg_log.add_validated_block(block2); let mut block3 = mock_block(3); block3.payload = mock_seal( 0, 2, vec![2], &key_pairs[0], (2..4) .map(|i| mock_vote(PbftMessageType::Commit, 0, 2, vec![2], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.msg_log.add_validated_block(block3); // Simulate commit of block 1; verify that node is in the Finishing(true) phase at sequence // number 2 and commit_block was called with block 2's ID (committed block 2 using seal in // block 3 after committing block 1) assert!(node.on_block_commit(vec![1], &mut state).is_ok()); assert_eq!(2, state.seq_num); assert_eq!(PbftPhase::Finishing(true), state.phase); assert!(service.was_called_with_args(stringify_func_call!("commit_block", vec![2]))); } /// Because the consensus seal for a block `n` is stored in a block `n + 1`, when a node /// catches up to the rest of the network, it will not be able to commit the final block /// because there is no next block with a consensus seal to use. In this scenario, the node /// that is catching up will broadcast a request to the whole network for the final block’s /// seal. This request will happen when the node committed block `n` using catch-up (as /// indicated by the bool stored in the `Finishing` value of the node’s phase), but it does not /// have a block `n + 2` to commit block `n + 1`; it will not happen if the node did not commit /// block `n` using catch-up. #[test] fn test_final_block_seal_request() { // Initialize a node and set its phase to Finishing(true) to simulate having committed // block 1 using the catch up procedure let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(0)); state.phase = PbftPhase::Finishing(true); // Recieve BlockCommit notification for block 1 and verify that the node broadcasted a // SealRequest message for sequence number 2 assert!(node.on_block_commit(vec![1], &mut state).is_ok()); assert!(service.was_called_with_args(stringify_func_call!( "broadcast", "SealRequest", mock_msg(PbftMessageType::SealRequest, 0, 2, vec![0], vec![], false).message_bytes ))); } /// When a node requests a consensus seal for a block `n` by broadcasting a `SealRequest` /// message, the other nodes in the network will need to receive this message and, if they have /// committed block `n` and are now on sequence number `n + 1`, reply to that node with a valid /// seal for block `n`. /// /// If a node is on sequence number `n + 1` when it receives a `SealRequest` for block `n`, it /// can build the seal and send it right away. However, if the node is currently on sequence /// number `n` (it has not committed block `n` yet), it will not be able to build the seal /// right away; in this case, it will add the request to its message log, wait until the block /// is committed (node will now be on `seq_num` `n + 1`), then check the log for a /// `SealRequest` for block `n` and if there is one, it will build the seal and send it. If the /// receiving node is on any sequence number other than `n` or `n + 1`, it should simply ignore /// the request (if it’s behind, it also needs to catch up; if it’s too far ahead, it won’t be /// able to build the seal). #[test] #[allow(unused_must_use)] fn test_seal_request_handling() { // Initialize a node and set its sequence number to 2 let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(0)); state.seq_num = 2; // Add messages needed to build seal for block 1 node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 1, vec![0], vec![1], true, )); node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 1, vec![1], vec![1], false, )); node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 1, vec![2], vec![1], false, )); // Receive a SealRequest for sequence number 2 and verify that a seal is sent to the node // that requested it assert!(node .on_peer_message( mock_msg(PbftMessageType::SealRequest, 0, 1, vec![3], vec![], false), &mut state ) .is_ok()); assert!(service.was_called_with_args(stringify_func_call!( "send_to", &vec![3], "Seal", node.build_seal(&mut state) .expect("Failed to build seal") .write_to_bytes() .expect("Failed to write seal to bytes") ))); // Add messages needed to build seal for block 2 node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 2, vec![0], vec![2], true, )); node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 2, vec![1], vec![2], false, )); node.msg_log.add_message(mock_msg( PbftMessageType::Commit, 0, 2, vec![2], vec![2], false, )); // Verify SealRequests for old or future sequence numbers are ignored node.on_peer_message( mock_msg(PbftMessageType::SealRequest, 0, 0, vec![3], vec![], false), &mut state, ); node.on_peer_message( mock_msg(PbftMessageType::SealRequest, 0, 3, vec![3], vec![], false), &mut state, ); assert_eq!( 0, node.msg_log .get_messages_of_type_view(PbftMessageType::SealRequest, 0) .len() ); assert!(service.was_called_with_args_once(stringify_func_call!( "send_to", &vec![3], "Seal" ))); // Verify SealRequest for node's current sequence number gets added to the log let seq_num_2_req = mock_msg(PbftMessageType::SealRequest, 0, 2, vec![3], vec![], false); node.on_peer_message(seq_num_2_req.clone(), &mut state); assert_eq!( &&seq_num_2_req, node.msg_log .get_messages_of_type_seq(PbftMessageType::SealRequest, 2) .first() .expect("SealRequest not in log") ); // Simulate committing block 2 and verify that the node sends a seal for block 2 to the // node that requested it assert!(node.on_block_commit(vec![2], &mut state).is_ok()); assert_eq!(3, state.seq_num); assert!(service.was_called_with_args(stringify_func_call!( "send_to", &vec![3], "Seal", node.build_seal(&mut state) .expect("Failed to build seal") .write_to_bytes() .expect("Failed to write seal to bytes") ))); } /// When a node that is catching up has requested the consensus seal for the final block and /// another node has replied with the seal, the requesting node will need to handle the seal /// message. This handling includes validating the message according to the following criteria: /// /// 1. The node has the block that this seal is for in its log /// 2. The block is for the current sequence number /// 3. The consensus seal itself is valid (as determined by the /// `PbftNode::verify_consensus_seal` method) /// /// (1) and (2) ensure that the node can and should actually commit the block the seal is for: /// if the node doesn’t have that block or the block isn’t for the node’s current sequence /// number, it will not be able to commit it at the current sequence number. (3) validates the /// seal itself to make sure it is correct. /// /// In addition to these criteria, the node should only commit the block using the seal once; /// because the node requests the seal from all nodes in the network, it will receive a seal /// from most (if not all) of the other nodes. To prevent trying to commit the same block each /// time, the node must check if it is in the Finishing phase to determine whether or not it /// has already instructed the validator to commit the block. #[test] #[allow(unused_must_use)] fn test_seal_reply_handling() { // Create signing keys for a new network and instantiate node 1 let key_pairs = mock_signer_network(4); let (mut node, mut state, service) = mock_node( &mock_config_from_signer_network(&key_pairs), key_pairs[1].pub_key.clone(), mock_block(0), ); // Receive a seal for block 1 and verify that the node doesn't use it for catch up (node // doesn't have the block yet) let seal1 = mock_seal( 0, 1, vec![1], &key_pairs[0], (2..4) .map(|i| mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(), ); let seal_msg1 = ParsedMessage { from_self: false, header_bytes: vec![], header_signature: vec![], message: PbftMessageWrapper::Seal(seal1.clone()), message_bytes: seal1 .write_to_bytes() .expect("Failed to write seal1 to bytes"), }; node.on_peer_message(seal_msg1.clone(), &mut state); assert_eq!(PbftPhase::PrePreparing, state.phase); // Add blocks 1 and 2 to the node's log node.msg_log.add_validated_block(mock_block(1)); node.msg_log.add_validated_block(mock_block(2)); // Receive a seal for block 2 and verify that the node doesn't use it for catch up (not for // current sequence number) let seal2 = mock_seal( 0, 2, vec![2], &key_pairs[0], (2..4) .map(|i| mock_vote(PbftMessageType::Commit, 0, 2, vec![2], &key_pairs[i])) .collect::<Vec<_>>(), ); let seal_msg2 = ParsedMessage { from_self: false, header_bytes: vec![], header_signature: vec![], message: PbftMessageWrapper::Seal(seal2.clone()), message_bytes: seal2 .write_to_bytes() .expect("Failed to write seal2 to bytes"), }; node.on_peer_message(seal_msg2, &mut state); assert_eq!(PbftPhase::PrePreparing, state.phase); // Verify that an invalid seal (e.g. vote from seal signer) is rejected let invalid_seal1 = mock_seal( 0, 1, vec![1], &key_pairs[0], vec![ mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[0]), mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[2]), ], ); let invalid_seal_msg1 = ParsedMessage { from_self: false, header_bytes: vec![], header_signature: vec![], message: PbftMessageWrapper::Seal(invalid_seal1.clone()), message_bytes: invalid_seal1 .write_to_bytes() .expect("Failed to write seal1 to bytes"), }; node.on_peer_message(invalid_seal_msg1, &mut state); assert_eq!(PbftPhase::PrePreparing, state.phase); // Verify that a valid seal for block one is accepted and used to perform a catch up commit // of block 1 assert!(node.on_peer_message(seal_msg1, &mut state).is_ok()); assert_eq!(PbftPhase::Finishing(false), state.phase); assert!(service.was_called_with_args(stringify_func_call!("commit_block", vec![1]))); // Verify that a duplicate seal won't result in another commit_block call let extra_seal1 = mock_seal( 0, 1, vec![1], &key_pairs[2], vec![ mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[0]), mock_vote(PbftMessageType::Commit, 0, 1, vec![1], &key_pairs[3]), ], ); let extra_seal_msg1 = ParsedMessage { from_self: false, header_bytes: vec![], header_signature: vec![], message: PbftMessageWrapper::Seal(extra_seal1.clone()), message_bytes: extra_seal1 .write_to_bytes() .expect("Failed to write seal1 to bytes"), }; assert!(node.on_peer_message(extra_seal_msg1, &mut state).is_ok()); assert!(service.was_called_with_args_once(stringify_func_call!("commit_block", vec![1]))); } /// When the whole network is starting "fresh" from a non-genesis block, none of the nodes will /// have the `Commit` messages necessary to build the consensus seal for the last committed /// block (the chain head). To bootstrap the network in this scenario, all nodes will send a /// `Commit` message for their chain head whenever one of the PBFT members connects; when /// > 2f + 1 nodes have connected and received these `Commit` messages, the nodes will be able /// to build a seal using the messages. #[test] #[allow(unused_must_use)] fn test_broadcast_bootstrap_commit() { // Initialize a node let (mut node, mut state, service) = mock_node(&mock_config(4), vec![0], mock_block(0)); assert_eq!(1, state.seq_num); // Verify commit isn't broadcast when chain head is block 0 (no seal needed for block) node.on_peer_connected(vec![1], &mut state); assert!(!service.was_called("send_to")); // Simulate committing block 1 node.msg_log.add_validated_block(mock_block(1)); assert!(node.on_block_commit(vec![1], &mut state).is_ok()); assert_eq!(2, state.seq_num); assert_eq!(vec![1], state.chain_head); // Verify peer connections from non-members are ignored node.on_peer_connected(vec![4], &mut state); assert!(!service.was_called("send_to")); // Verify that a Commit with view 0 is sent when chain head is block 1 assert!(node.on_peer_connected(vec![1], &mut state).is_ok()); assert!(service.was_called_with_args(stringify_func_call!( "send_to", vec![1], "Commit", mock_msg(PbftMessageType::Commit, 0, 1, vec![0], vec![1], false).message_bytes ))); // Simulate committing block 2 (with seal for block 1) let key_pairs = mock_signer_network(3); let mut block2 = mock_block(2); block2.payload = mock_seal( 1, 1, vec![1], &key_pairs[0], (1..3) .map(|i| mock_vote(PbftMessageType::Commit, 1, 1, vec![1], &key_pairs[i])) .collect::<Vec<_>>(), ) .write_to_bytes() .expect("Failed to write seal to bytes"); node.msg_log.add_validated_block(block2); assert!(node.on_block_commit(vec![2], &mut state).is_ok()); assert_eq!(3, state.seq_num); assert_eq!(vec![2], state.chain_head); // Verify that a Commit with view 1 (same as consensus seal in block 2) is sent assert!(node.on_peer_connected(vec![2], &mut state).is_ok()); assert!(service.was_called_with_args(stringify_func_call!( "send_to", vec![2], "Commit", mock_msg(PbftMessageType::Commit, 1, 2, vec![0], vec![2], false).message_bytes ))); // Verify Commit messages are sent to all peers that are already connected on node startup let peers = vec![PeerInfo { peer_id: vec![2] }, PeerInfo { peer_id: vec![3] }]; let mut state2 = PbftState::new(vec![1], 2, &mock_config(4)); let service2 = MockService::new(&mock_config(4)); let _node2 = PbftNode::new( &mock_config(4), mock_block(2), peers, Box::new(service2.clone()), &mut state2, ); assert!(service2.was_called_with_args(stringify_func_call!( "send_to", vec![2], "Commit", mock_msg(PbftMessageType::Commit, 0, 2, vec![1], vec![2], false).message_bytes ))); assert!(service2.was_called_with_args(stringify_func_call!( "send_to", vec![3], "Commit", mock_msg(PbftMessageType::Commit, 0, 2, vec![1], vec![2], false).message_bytes ))); } }
41.753031
106
0.571563
e6eb4f59b3796985a0819f3af0de31f1d67ff1d8
684
// Copyright 2019, The Gtk-rs Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> use gdk_sys; use glib; use glib::translate::*; use ContentDeserializer; impl ContentDeserializer { pub fn set_value(&self, value: glib::Value) { assert!(value.type_() == self.get_gtype(), "Type mismatch"); let src_value = value.to_glib_none(); unsafe { let dest_value = gdk_sys::gdk_content_deserializer_get_value(self.to_glib_none().0); gobject_sys::g_value_copy(src_value.0, dest_value); } } }
32.571429
96
0.687135
187f544708b90bf87b68bd2a88289e709e43a493
9,644
//! Implementation of the Java Minecraft ping protocol. //! https://wiki.vg/Server_List_Ping use crate::{Error, Pingable}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use serde::Deserialize; use std::{ io::{self, Cursor, Read, Write}, net::{IpAddr, SocketAddr, TcpStream}, time::{Duration, Instant}, }; use thiserror::Error; use trust_dns_resolver::{config::*, Resolver}; /// Configuration for pinging a Java server. /// /// # Examples /// /// ``` /// use mcping::Java; /// use std::time::Duration; /// /// let bedrock_config = Java { /// server_address: "mc.hypixel.net".to_string(), /// timeout: Some(Duration::from_secs(10)), /// }; /// ``` #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct Java { /// The java server address. /// /// This can be either an IP or a hostname, and both may optionally have a /// port at the end. /// /// DNS resolution will be performed on hostnames. /// /// # Examples /// /// ```text /// test.server.com /// test.server.com:19384 /// 13.212.76.209 /// 13.212.76.209:23193 /// ``` pub server_address: String, /// The connection timeout if a connection cannot be made. pub timeout: Option<Duration>, } impl Pingable for Java { type Response = JavaResponse; fn ping(self) -> Result<(u64, Self::Response), crate::Error> { let mut conn = Connection::new(&self.server_address, self.timeout)?; // Handshake conn.send_packet(Packet::Handshake { version: 47, host: conn.host.clone(), port: conn.port, next_state: 1, })?; // Request conn.send_packet(Packet::Request {})?; let resp = match conn.read_packet()? { Packet::Response { response } => serde_json::from_str(&response)?, _ => return Err(Error::InvalidPacket), }; // Ping Request let r = rand::random(); conn.send_packet(Packet::Ping { payload: r })?; let before = Instant::now(); let ping = match conn.read_packet()? { Packet::Pong { payload } if payload == r => { (Instant::now() - before).as_millis() as u64 } _ => return Err(Error::InvalidPacket), }; Ok((ping, resp)) } } /// The server status reponse /// /// More information can be found [here](https://wiki.vg/Server_List_Ping). #[derive(Deserialize)] pub struct JavaResponse { /// The version of the server. pub version: Version, /// Information about online players pub players: Players, /// The description of the server (MOTD). pub description: Chat, /// The server icon (a Base64-encoded PNG image) pub favicon: Option<String>, } /// Information about the server's version #[derive(Deserialize)] pub struct Version { /// The name of the version the server is running /// /// In practice this comes in a large variety of different formats. pub name: String, /// See https://wiki.vg/Protocol_version_numbers pub protocol: i64, } /// An online player of the server. #[derive(Deserialize)] pub struct Player { /// The name of the player. pub name: String, /// The player's UUID pub id: String, } /// The stats for players on the server. #[derive(Deserialize)] pub struct Players { /// The max amount of players. pub max: i64, /// The amount of players online. pub online: i64, /// A preview of which players are online /// /// In practice servers often don't send this or use it for more advertising pub sample: Option<Vec<Player>>, } /// This is a partial implemenation of a Minecraft chat component limited to just text // TODO: Finish this object. #[derive(Deserialize)] #[serde(untagged)] pub enum Chat { Text { text: String }, String(String), } impl Chat { pub fn text(&self) -> &str { match self { Chat::Text { text } => text.as_str(), Chat::String(s) => s.as_str(), } } } trait ReadJavaExt: Read + ReadBytesExt { fn read_varint(&mut self) -> io::Result<i32> { let mut res = 0i32; for i in 0..5 { let part = self.read_u8()?; res |= (part as i32 & 0x7F) << (7 * i); if part & 0x80 == 0 { return Ok(res); } } Err(io::Error::new(io::ErrorKind::Other, "VarInt too big!")) } fn read_string(&mut self) -> io::Result<String> { let len = self.read_varint()? as usize; let mut buf = vec![0; len as usize]; self.read_exact(&mut buf)?; Ok(String::from_utf8(buf).expect("Invalid UTF-8 String.")) } } impl<T> ReadJavaExt for T where T: Read + ReadBytesExt {} trait WriteJavaExt: Write + WriteBytesExt { fn write_varint(&mut self, mut val: i32) -> io::Result<()> { for _ in 0..5 { if val & !0x7F == 0 { self.write_u8(val as u8)?; return Ok(()); } self.write_u8((val & 0x7F | 0x80) as u8)?; val >>= 7; } Err(io::Error::new(io::ErrorKind::Other, "VarInt too big!")) } fn write_string(&mut self, s: &str) -> io::Result<()> { self.write_varint(s.len() as i32)?; self.write_all(s.as_bytes())?; Ok(()) } } impl<T> WriteJavaExt for T where T: Write + WriteBytesExt {} #[derive(Debug, Error)] #[error("invalid packet response `{packet:?}`")] pub struct InvalidPacket { packet: Packet, } #[derive(Debug)] pub(crate) enum Packet { Handshake { version: i32, host: String, port: u16, next_state: i32, }, Response { response: String, }, Pong { payload: u64, }, Request {}, Ping { payload: u64, }, } struct Connection { stream: TcpStream, host: String, port: u16, } impl Connection { fn new(address: &str, timeout: Option<Duration>) -> Result<Self, Error> { // Split the address up into it's parts, saving the host and port for later and converting the // potential domain into an ip let mut parts = address.split(':'); let host = parts.next().ok_or(Error::InvalidAddress)?.to_string(); // If a port exists we want to try and parse it and if not we will // default to 25565 (Minecraft) let port = if let Some(port) = parts.next() { port.parse::<u16>().map_err(|_| Error::InvalidAddress)? } else { 25565 }; // Attempt to lookup the ip of the server from an srv record, falling back on the ip from a host let resolver = Resolver::new(ResolverConfig::default(), ResolverOpts::default()).unwrap(); // Determine what host to lookup by doing the following: // - Lookup the SRV record for the domain, if it exists perform a lookup of the ip from the target // and grab the port pointed at by the record. // // Note: trust_dns_resolver should do a recursive lookup for an ip but it doesn't seem to at // the moment. // // - If the above failed in any way fall back to the normal ip lookup from the host provided // and use the provided port. let lookup_ip = |host: &str| -> Option<IpAddr> { resolver.lookup_ip(host).ok()?.into_iter().next() }; let (ip, port) = resolver .srv_lookup(format!("_minecraft._tcp.{}.", &host)) .ok() .and_then(|lookup| { let record = lookup.into_iter().next()?; let ip = lookup_ip(&record.target().to_string())?; Some((ip, record.port())) }) .or_else(|| Some((lookup_ip(&host)?, port))) .ok_or(Error::DnsLookupFailed)?; let socket_addr = SocketAddr::new(ip, port); Ok(Self { stream: if let Some(timeout) = timeout { TcpStream::connect_timeout(&socket_addr, timeout)? } else { TcpStream::connect(&socket_addr)? }, host, port, }) } fn send_packet(&mut self, p: Packet) -> Result<(), Error> { let mut buf = Vec::new(); match p { Packet::Handshake { version, host, port, next_state, } => { buf.write_varint(0x00)?; buf.write_varint(version)?; buf.write_string(&host)?; buf.write_u16::<BigEndian>(port)?; buf.write_varint(next_state)?; } Packet::Request {} => { buf.write_varint(0x00)?; } Packet::Ping { payload } => { buf.write_varint(0x01)?; buf.write_u64::<BigEndian>(payload)?; } _ => return Err(Error::InvalidPacket), } self.stream.write_varint(buf.len() as i32)?; self.stream.write_all(&buf)?; Ok(()) } fn read_packet(&mut self) -> Result<Packet, Error> { let len = self.stream.read_varint()?; let mut buf = vec![0; len as usize]; self.stream.read_exact(&mut buf)?; let mut c = Cursor::new(buf); Ok(match c.read_varint()? { 0x00 => Packet::Response { response: c.read_string()?, }, 0x01 => Packet::Pong { payload: c.read_u64::<BigEndian>()?, }, _ => return Err(Error::InvalidPacket), }) } }
29.048193
106
0.549876
0eac74f9db953eec91f5fbb59f6dd321653fa2c5
23,193
use super::super::bn256::check_curve_init; use super::super::mcl::{ mclBnG1_add, mclBnG1_dbl, mclBnG1_neg, mclBnG1_normalize, Fp as mcl_fq, G1 as mcl_g1, }; use super::g2::G2Affine; use super::{Bn256, Fq, Fq12, FqRepr, Fr, FrRepr}; use crate::{CurveAffine, CurveProjective, EncodedPoint, Engine, GroupDecodingError, RawEncodable}; use ff::{BitIterator, Field, PrimeField, PrimeFieldRepr, SqrtField}; use crate::mcl::{mclBnFp_add, mclBnFp_inv, mclBnFp_sqr}; use rand::{Rand, Rng}; use std::fmt; use std::ops::{AddAssign, MulAssign, SubAssign}; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct G1Affine { pub(crate) x: Fq, pub(crate) y: Fq, pub(crate) infinity: bool, } impl ::std::fmt::Display for G1Affine { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { if self.infinity { write!(f, "{}(Infinity)", "G1") } else { write!(f, "{}(x={}, y={})", "G1", self.x, self.y) } } } #[derive(Copy, Clone, Debug, Eq)] pub struct G1(pub(crate) mcl_g1); impl ::std::fmt::Display for G1 { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "{}", self.into_affine()) } } impl PartialEq for G1 { fn eq(&self, other: &G1) -> bool { self.0.eq(&other.0) } } impl G1Affine { fn mul_bits<S: AsRef<[u64]>>(&self, bits: BitIterator<S>) -> G1 { let mut res = G1::zero(); for i in bits { res.double(); if i { res.add_assign_mixed(self) } } res } /// Attempts to construct an affine point given an x-coordinate. The /// point is not guaranteed to be in the prime order subgroup. /// /// If and only if `greatest` is set will the lexicographically /// largest y-coordinate be selected. fn get_point_from_x(x: Fq, greatest: bool) -> Option<G1Affine> { // Compute x^3 + b let mut x3b = x; x3b.square(); x3b.mul_assign(&x); x3b.add_assign(&G1Affine::get_coeff_b()); x3b.sqrt().map(|y| { let mut negy = y; negy.negate(); G1Affine { x: x, y: if (y < negy) ^ greatest { y } else { negy }, infinity: false, } }) } fn is_on_curve(&self) -> bool { if self.is_zero() { true } else { // Check that the point is on the curve let mut y2 = self.y; y2.square(); let mut x3b = self.x; x3b.square(); x3b.mul_assign(&self.x); x3b.add_assign(&Self::get_coeff_b()); y2 == x3b } } } impl CurveAffine for G1Affine { type Engine = Bn256; type Scalar = Fr; type Base = Fq; type Prepared = G1Prepared; type Projective = G1; type Uncompressed = G1Uncompressed; type Compressed = G1Compressed; type Pair = G2Affine; type PairingResult = Fq12; fn zero() -> Self { G1Affine { x: Fq::zero(), y: Fq::one(), infinity: true, } } fn one() -> Self { Self::get_generator() } fn is_zero(&self) -> bool { self.infinity } fn mul<S: Into<<Self::Scalar as PrimeField>::Repr>>(&self, by: S) -> G1 { let bits = BitIterator::new(by.into()); self.mul_bits(bits) } fn negate(&mut self) { if !self.is_zero() { self.y.negate(); } } fn prepare(&self) -> Self::Prepared { G1Prepared::from_affine(*self) } fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult { self.perform_pairing(other) } fn into_projective(&self) -> G1 { (*self).into() } #[inline(always)] fn as_xy(&self) -> (&Self::Base, &Self::Base) { (&self.x, &self.y) } #[inline(always)] fn into_xy_unchecked(self) -> (Self::Base, Self::Base) { (self.x, self.y) } #[inline(always)] fn from_xy_unchecked(x: Self::Base, y: Self::Base) -> Self { let infinity = x.is_zero() && y.is_zero(); Self { x: x, y: y, infinity, } } fn from_xy_checked(x: Self::Base, y: Self::Base) -> Result<Self, GroupDecodingError> { let infinity = x.is_zero() && y.is_zero(); let affine = Self { x: x, y: y, infinity, }; if !affine.is_on_curve() { Err(GroupDecodingError::NotOnCurve) } else { Ok(affine) } } fn a_coeff() -> Self::Base { Self::Base::zero() } fn b_coeff() -> Self::Base { G1Affine::get_coeff_b() } } impl CurveProjective for G1 { type Engine = Bn256; type Scalar = Fr; type Base = Fq; type Affine = G1Affine; // The point at infinity is always represented by // Z = 0. fn zero() -> Self { check_curve_init(); Self(mcl_g1::zero()) } fn one() -> Self { G1Affine::one().into() } // The point at infinity is always represented by // Z = 0. fn is_zero(&self) -> bool { check_curve_init(); self.0.is_zero() } fn is_normalized(&self) -> bool { self.is_zero() || self.0.z.is_one() } //TODO:: not change now fn batch_normalization(v: &mut [Self]) { // Montgomery’s Trick and Fast Implementation of Masked AES // Genelle, Prouff and Quisquater // Section 3.2 // First pass: compute [a, ab, abc, ...] let mut prod = Vec::with_capacity(v.len()); let mut tmp = mcl_fq::one(); check_curve_init(); for g in v .iter_mut() // Ignore normalized elements .filter(|g| !g.is_normalized()) { tmp.mul_assign(&g.0.z); prod.push(tmp); } // Invert `tmp`. unsafe { mclBnFp_inv(&mut tmp, &tmp) }; // Guaranteed to be nonzero. // Second pass: iterate backwards to compute inverses for (g, s) in v .iter_mut() // Backwards .rev() // Ignore normalized elements .filter(|g| !g.is_normalized()) // Backwards, skip last element, fill in one for last term. .zip(prod.into_iter().rev().skip(1).chain(Some(mcl_fq::one()))) { // tmp := tmp * g.z; g.z := tmp * s = 1/z let mut newtmp = tmp; newtmp.mul_assign(&g.0.z); g.0.z = tmp; g.0.z.mul_assign(&s); tmp = newtmp; } // Perform affine transformations for g in v.iter_mut().filter(|g| !g.is_normalized()) { let mut z = g.0.z; // 1/z unsafe { mclBnFp_sqr(&mut z, &z) }; g.0.x.mul_assign(&z); // x/z^2 z.mul_assign(&g.0.z); // 1/z^3 g.0.y.mul_assign(&z); // y/z^3 g.0.z = mcl_fq::one(); // z = 1 } } fn double(&mut self) { check_curve_init(); unsafe { mclBnG1_dbl(&mut self.0, &self.0) }; } fn add_assign(&mut self, other: &Self) { check_curve_init(); unsafe { mclBnG1_add(&mut self.0, &self.0, &other.0) }; } //TODO: not change now fn add_assign_mixed(&mut self, other: &Self::Affine) { if other.is_zero() { return; } if self.is_zero() { self.0.x = other.x.0; self.0.y = other.y.0; self.0.z = mcl_fq::one(); return; } // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl // Z1Z1 = Z1^2 let mut z1z1 = self.0.z; unsafe { mclBnFp_sqr(&mut z1z1, &z1z1) }; // U2 = X2*Z1Z1 let mut u2 = other.x.0; u2.mul_assign(&z1z1); // S2 = Y2*Z1*Z1Z1 let mut s2 = other.y.0; s2.mul_assign(&self.0.z); s2.mul_assign(&z1z1); if self.0.x == u2 && self.0.y == s2 { // The two points are equal, so we double. self.double(); } else { // If we're adding -a and a together, self.z becomes zero as H becomes zero. // H = U2-X1 let mut h = u2; h.sub_assign(&self.0.x); // HH = H^2 let mut hh = h; unsafe { mclBnFp_sqr(&mut hh, &hh) }; // I = 4*HH let mut i = hh; unsafe { mclBnFp_add(&mut i, &i, &i); mclBnFp_add(&mut i, &i, &i); } // J = H*I let mut j = h; j.mul_assign(&i); // r = 2*(S2-Y1) let mut r = s2; r.sub_assign(&self.0.y); unsafe { mclBnFp_add(&mut r, &r, &r) }; // V = X1*I let mut v = self.0.x; v.mul_assign(&i); // X3 = r^2 - J - 2*V self.0.x = r; unsafe { mclBnFp_sqr(&mut self.0.x, &self.0.x) }; self.0.x.sub_assign(&j); self.0.x.sub_assign(&v); self.0.x.sub_assign(&v); // Y3 = r*(V-X3)-2*Y1*J j.mul_assign(&self.0.y); // J = 2*Y1*J unsafe { mclBnFp_add(&mut j, &j, &j) }; self.0.y = v; self.0.y.sub_assign(&self.0.x); self.0.y.mul_assign(&r); self.0.y.sub_assign(&j); // Z3 = (Z1+H)^2-Z1Z1-HH self.0.z.add_assign(&h); unsafe { mclBnFp_sqr(&mut self.0.z, &self.0.z) }; self.0.z.sub_assign(&z1z1); self.0.z.sub_assign(&hh); } } fn negate(&mut self) { check_curve_init(); unsafe { mclBnG1_neg(&mut self.0, &self.0) }; } // TODO: not change now fn mul_assign<S: Into<<Self::Scalar as PrimeField>::Repr>>(&mut self, other: S) { let mut res = Self::zero(); let mut found_one = false; for i in BitIterator::new(other.into()) { if found_one { res.double(); } else { found_one = i; } if i { res.add_assign(self); } } *self = res; } fn into_affine(&self) -> G1Affine { (*self).into() } fn recommended_wnaf_for_scalar(scalar: <Self::Scalar as PrimeField>::Repr) -> usize { Self::empirical_recommended_wnaf_for_scalar(scalar) } fn recommended_wnaf_for_num_scalars(num_scalars: usize) -> usize { Self::empirical_recommended_wnaf_for_num_scalars(num_scalars) } fn as_xyz(&self) -> (&Self::Base, &Self::Base, &Self::Base) { unimplemented!("not implement for projective"); } fn into_xyz_unchecked(self) -> (Self::Base, Self::Base, Self::Base) { (Fq(self.0.x), Fq(self.0.y), Fq(self.0.z)) } fn from_xyz_unchecked(x: Self::Base, y: Self::Base, z: Self::Base) -> Self { Self(mcl_g1 { x: x.0, y: y.0, z: z.0, }) } fn from_xyz_checked( _x: Self::Base, _y: Self::Base, _z: Self::Base, ) -> Result<Self, GroupDecodingError> { unimplemented!("on curve check is not implemented for projective") } } // The affine point X, Y is represented in the jacobian // coordinates with Z = 1. impl From<G1Affine> for G1 { fn from(p: G1Affine) -> G1 { if p.is_zero() { G1::zero() } else { G1(mcl_g1 { x: p.x.0, y: p.y.0, z: Fq::one().0, }) } } } // The projective point X, Y, Z is represented in the affine // coordinates as X/Z^2, Y/Z^3. impl From<G1> for G1Affine { fn from(p: G1) -> G1Affine { if p.is_zero() { G1Affine::zero() } else if p.0.z.is_one() { // If Z is one, the point is already normalized. G1Affine { x: Fq(p.0.x), y: Fq(p.0.y), infinity: false, } } else { let mut res = unsafe { mcl_g1::uninit() }; unsafe { mclBnG1_normalize(&mut res, &p.0) }; G1Affine { x: Fq(res.x), y: Fq(res.y), infinity: false, } } } } impl RawEncodable for G1Affine { fn into_raw_uncompressed_le(&self) -> Self::Uncompressed { let mut res = Self::Uncompressed::empty(); { let mut writer = &mut res.0[..]; self.x.into_raw_repr().write_le(&mut writer).unwrap(); self.y.into_raw_repr().write_le(&mut writer).unwrap(); } res } /// Creates a point from raw encoded coordinates without checking on curve fn from_raw_uncompressed_le_unchecked( encoded: &Self::Uncompressed, _infinity: bool, ) -> Result<Self, GroupDecodingError> { let copy = encoded.0; if copy.iter().all(|b| *b == 0) { return Ok(Self::zero()); } let mut x = FqRepr([0; 4]); let mut y = FqRepr([0; 4]); { let mut reader = &copy[..]; x.read_le(&mut reader).unwrap(); y.read_le(&mut reader).unwrap(); } Ok(G1Affine { x: Fq::from_raw_repr(x) .map_err(|e| GroupDecodingError::CoordinateDecodingError("x coordinate", e))?, y: Fq::from_raw_repr(y) .map_err(|e| GroupDecodingError::CoordinateDecodingError("y coordinate", e))?, infinity: false, }) } fn from_raw_uncompressed_le( encoded: &Self::Uncompressed, _infinity: bool, ) -> Result<Self, GroupDecodingError> { let affine = Self::from_raw_uncompressed_le_unchecked(&encoded, _infinity)?; if !affine.is_on_curve() { Err(GroupDecodingError::NotOnCurve) } else { Ok(affine) } } } #[derive(Copy, Clone)] pub struct G1Uncompressed([u8; 64]); impl Rand for G1 { fn rand<R: Rng>(rng: &mut R) -> Self { loop { let x = rng.gen(); let greatest = rng.gen(); if let Some(p) = G1Affine::get_point_from_x(x, greatest) { if !p.is_zero() { if p.is_on_curve() { return p.into_projective(); } } } } } } impl Rand for G1Affine { fn rand<R: Rng>(rng: &mut R) -> Self { loop { let x = rng.gen(); let greatest = rng.gen(); if let Some(p) = G1Affine::get_point_from_x(x, greatest) { if !p.is_zero() { if p.is_on_curve() { return p; } } } } } } impl AsRef<[u8]> for G1Uncompressed { fn as_ref(&self) -> &[u8] { &self.0 } } impl AsMut<[u8]> for G1Uncompressed { fn as_mut(&mut self) -> &mut [u8] { &mut self.0 } } impl fmt::Debug for G1Uncompressed { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { self.0[..].fmt(formatter) } } impl EncodedPoint for G1Uncompressed { type Affine = G1Affine; fn empty() -> Self { G1Uncompressed([0; 64]) } fn size() -> usize { 64 } fn into_affine(&self) -> Result<G1Affine, GroupDecodingError> { let affine = self.into_affine_unchecked()?; if !affine.is_on_curve() { Err(GroupDecodingError::NotOnCurve) } else { Ok(affine) } } fn into_affine_unchecked(&self) -> Result<G1Affine, GroupDecodingError> { // Create a copy of this representation. let mut copy = self.0; if copy[0] & (1 << 6) != 0 { // This is the point at infinity, which means that if we mask away // the first two bits, the entire representation should consist // of zeroes. copy[0] &= 0x3f; if copy.iter().all(|b| *b == 0) { Ok(G1Affine::zero()) } else { Err(GroupDecodingError::UnexpectedInformation) } } else { if copy[0] & (1 << 7) != 0 { // The bit indicating the y-coordinate should be lexicographically // largest is set, but this is an uncompressed element. return Err(GroupDecodingError::UnexpectedInformation); } // Unset the two most significant bits. copy[0] &= 0x3f; let mut x = FqRepr([0; 4]); let mut y = FqRepr([0; 4]); { let mut reader = &copy[..]; x.read_be(&mut reader).unwrap(); y.read_be(&mut reader).unwrap(); } Ok(G1Affine { x: Fq::from_repr(x) .map_err(|e| GroupDecodingError::CoordinateDecodingError("x coordinate", e))?, y: Fq::from_repr(y) .map_err(|e| GroupDecodingError::CoordinateDecodingError("y coordinate", e))?, infinity: false, }) } } fn from_affine(affine: G1Affine) -> Self { let mut res = Self::empty(); if affine.is_zero() { // Set the second-most significant bit to indicate this point // is at infinity. res.0[0] |= 1 << 6; } else { let mut writer = &mut res.0[..]; affine.x.into_repr().write_be(&mut writer).unwrap(); affine.y.into_repr().write_be(&mut writer).unwrap(); } res } } #[derive(Copy, Clone)] pub struct G1Compressed([u8; 32]); impl AsRef<[u8]> for G1Compressed { fn as_ref(&self) -> &[u8] { &self.0 } } impl AsMut<[u8]> for G1Compressed { fn as_mut(&mut self) -> &mut [u8] { &mut self.0 } } impl fmt::Debug for G1Compressed { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { self.0[..].fmt(formatter) } } impl EncodedPoint for G1Compressed { type Affine = G1Affine; fn empty() -> Self { G1Compressed([0; 32]) } fn size() -> usize { 32 } fn into_affine(&self) -> Result<G1Affine, GroupDecodingError> { let affine = self.into_affine_unchecked()?; // NB: Decompression guarantees that it is on the curve already. Ok(affine) } fn into_affine_unchecked(&self) -> Result<G1Affine, GroupDecodingError> { // Create a copy of this representation. let mut copy = self.0; if copy[0] & (1 << 6) != 0 { // This is the point at infinity, which means that if we mask away // the first two bits, the entire representation should consist // of zeroes. copy[0] &= 0x3f; if copy.iter().all(|b| *b == 0) { Ok(G1Affine::zero()) } else { Err(GroupDecodingError::UnexpectedInformation) } } else { // Determine if the intended y coordinate must be greater // lexicographically. let greatest = copy[0] & (1 << 7) != 0; // Unset the two most significant bits. copy[0] &= 0x3f; let mut x = FqRepr([0; 4]); { let mut reader = &copy[..]; x.read_be(&mut reader).unwrap(); } // Interpret as Fq element. let x = Fq::from_repr(x) .map_err(|e| GroupDecodingError::CoordinateDecodingError("x coordinate", e))?; G1Affine::get_point_from_x(x, greatest).ok_or(GroupDecodingError::NotOnCurve) } } fn from_affine(affine: G1Affine) -> Self { let mut res = Self::empty(); if affine.is_zero() { // Set the second-most significant bit to indicate this point // is at infinity. res.0[0] |= 1 << 6; } else { { let mut writer = &mut res.0[..]; affine.x.into_repr().write_be(&mut writer).unwrap(); } let mut negy = affine.y; negy.negate(); // Set the third most significant bit if the correct y-coordinate // is lexicographically largest. if affine.y > negy { res.0[0] |= 1 << 7; } } res } } impl G1Affine { // fn scale_by_cofactor(&self) -> G1 { // self.into_projective() // } fn get_generator() -> Self { G1Affine { x: super::fq::G1_GENERATOR_X, y: super::fq::G1_GENERATOR_Y, infinity: false, } } fn get_coeff_b() -> Fq { super::fq::B_COEFF } fn perform_pairing(&self, other: &G2Affine) -> Fq12 { super::Bn256::pairing(*self, *other) } } impl G1 { fn empirical_recommended_wnaf_for_scalar(scalar: FrRepr) -> usize { let num_bits = scalar.num_bits() as usize; if num_bits >= 130 { 4 } else if num_bits >= 34 { 3 } else { 2 } } fn empirical_recommended_wnaf_for_num_scalars(num_scalars: usize) -> usize { const RECOMMENDATIONS: [usize; 12] = [1, 3, 7, 20, 43, 120, 273, 563, 1630, 3128, 7933, 62569]; let mut ret = 4; for r in &RECOMMENDATIONS { if num_scalars > *r { ret += 1; } else { break; } } ret } } #[derive(Clone, Debug)] pub struct G1Prepared(pub(crate) G1Affine); impl G1Prepared { pub fn is_zero(&self) -> bool { self.0.is_zero() } pub fn from_affine(p: G1Affine) -> Self { G1Prepared(p) } } #[test] fn g1_generator() { use SqrtField; let mut x = Fq::zero(); let mut i = 0; loop { // y^2 = x^3 + b let mut rhs = x; rhs.square(); rhs.mul_assign(&x); rhs.add_assign(&G1Affine::get_coeff_b()); if let Some(y) = rhs.sqrt() { let yrepr = y.into_repr(); let mut negy = y; negy.negate(); let negyrepr = negy.into_repr(); let p = G1Affine { x: x, y: if yrepr < negyrepr { y } else { negy }, infinity: false, }; let g1 = p.into_projective(); if !g1.is_zero() { assert_eq!(i, 1); let g1 = G1Affine::from(g1); assert_eq!(g1, G1Affine::one()); break; } } i += 1; x.add_assign(&Fq::one()); } } #[test] fn test_base_point_addition_and_doubling() { let mut a = G1::one(); print!("{}\n\n", a); a.add_assign(&G1::one()); print!("{}\n\n", a); } #[test] fn g1_curve_tests() { crate::tests::curve::curve_tests::<G1>(); crate::tests::curve::random_transformation_tests::<G1>(); }
26.147689
98
0.491657
4a7203b9fdcb31cbc6968027071774666accc11a
22,160
// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::lexer::Lexer; use crate::lexer::SourceLocation; use crate::lexer::SourcePosition; use crate::lexer::Token; use crate::lexer::TokenPosition; use crate::lexer::TokenType; pub use crate::parser::expression::*; pub use crate::parser::if_statement::ElseCond; pub use crate::parser::if_statement::IfStatement; pub use crate::parser::let_statement::LetStatement; use crate::parser::return_statement::ReturnStatement; use crate::parser::set_statement::SetStatement; use crate::parser::try_statement::TryStatement; use crate::parser::while_statement::WhileStatement; use serde_json::json; pub mod expression; mod if_statement; mod let_statement; mod return_statement; mod set_statement; mod try_statement; mod while_statement; use std::iter::Iterator; use std::iter::Peekable; #[derive(PartialEq, Debug)] pub struct Program { pub statements: Vec<Statement>, } impl Program { pub fn dump_for_testing(&self) -> serde_json::Value { return json!(self .statements .iter() .map(|s| s.dump_for_testing()) .collect::<Vec<serde_json::Value>>()); } } #[derive(PartialEq, Debug)] pub enum Statement { Let(LetStatement), Call(CallStatement), Execute(ExecuteStatement), Return(ReturnStatement), If(IfStatement), While(WhileStatement), Function(FunctionStatement), For(ForStatement), Try(TryStatement), Set(SetStatement), Break(BreakStatement), } impl Statement { pub fn dump_for_testing(&self) -> serde_json::Value { return match &self { Statement::Let(x) => json!({ "let": x.dump_for_testing() }), Statement::If(x) => json!({ "if": x.dump_for_testing() }), Statement::Call(x) => json!({ "call": x.dump_for_testing() }), Statement::Return(x) => json!({ "return": x.dump_for_testing() }), Statement::While(x) => json!({ "while": x.dump_for_testing() }), Statement::Function(x) => json!({ "function": x.dump_for_testing() }), Statement::Try(x) => json!({ "try": x.dump_for_testing() }), Statement::Set(x) => json!({ "set": x.dump_for_testing() }), Statement::Break(x) => json!({ "break": x.dump_for_testing() }), _ => json!({}), }; } } #[derive(PartialEq, Debug)] pub struct CallStatement { name: String, pub arguments: Vec<Expression>, } impl CallStatement { pub fn dump_for_testing(&self) -> serde_json::Value { return json!({ "method": self.name, "arguments": self.arguments.iter().map(|s| s.dump_for_testing()).collect::<Vec<serde_json::Value>>(), }); } } #[derive(PartialEq, Debug)] pub struct BreakStatement {} impl BreakStatement { pub fn dump_for_testing(&self) -> serde_json::Value { return json!({}); } } #[derive(PartialEq, Debug)] pub struct ExecuteStatement { arguments: Vec<Expression>, } #[derive(PartialEq, Debug)] pub struct FunctionStatement { pub name: String, // TODO change to list of tokens? arguments: Vec<String>, pub body: Vec<Statement>, // true if 'function!' overwrite: bool, abort: bool, } impl FunctionStatement { pub fn dump_for_testing(&self) -> serde_json::Value { return json!({ "name": self.name, "arguments": self.arguments, "body": self.body.iter().map(|s| s.dump_for_testing()).collect::<Vec<serde_json::Value>>(), "overwrite": self.overwrite, "abort": self.abort, }); } } #[derive(PartialEq, Debug)] pub struct ForStatement { loop_variable: LoopVariable, range: Expression, body: Vec<Statement>, } #[derive(PartialEq, Debug)] pub enum LoopVariable { Single(String), List(Vec<String>), } #[derive(PartialEq, Debug)] pub struct ParseError { pub message: String, pub position: TokenPosition, } pub struct Parser<'a> { pub l: Lexer<'a>, tokens: Vec<Token>, lexer: Peekable<std::vec::IntoIter<Token>>, pub errors: Vec<ParseError>, } impl<'a> Parser<'a> { pub fn new(mut lexer: Lexer<'a>) -> Parser { let tokens = lexer.lex(); return Parser { l: lexer, tokens: tokens.clone(), lexer: tokens.into_iter().peekable(), errors: Vec::new(), }; } pub fn parse(&mut self) -> Program { let mut statements = Vec::new(); while self.lexer.peek() != None { if let Some(stmt) = self.parse_statement() { statements.push(stmt); } } return Program { statements: statements, }; } pub fn resolve_location(&self, loc: SourceLocation) -> TokenPosition { self.l.token_position(&loc) } pub fn find_token(&self, pos: SourcePosition) -> Result<Token, ()> { // TODO: This is very naive implementation, we can do a lot of optimizations here. for token in &self.tokens { let token_pos = self.resolve_location(token.location.clone()); if token_pos.start <= pos && pos <= token_pos.end { return Ok(token.clone()); } } Err(()) } // Parses a statement, including the new line at the end of statement. // Returns None when statement failed to parse. fn parse_statement(&mut self) -> Option<Statement> { let token = self.lexer.next()?; match token.token_type { TokenType::Let => { if let Some(stmt) = self.parse_let_statement() { return Some(Statement::Let(stmt)); } } TokenType::Break => { self.expect_end_of_statement()?; return Some(Statement::Break(BreakStatement {})); } TokenType::Call => { if let Some(stmt) = self.parse_call_statement() { return Some(Statement::Call(stmt)); } } TokenType::Return => { if let Some(stmt) = return_statement::parse(self) { return Some(Statement::Return(stmt)); } } TokenType::Try => { if let Some(stmt) = try_statement::parse(self) { return Some(Statement::Try(stmt)); } } TokenType::Set => { if let Some(stmt) = set_statement::parse(self) { return Some(Statement::Set(stmt)); } } TokenType::Execute => return self.parse_execute_statement(), TokenType::If => { if let Some(stmt) = self.parse_if_statement() { return Some(Statement::If(stmt)); } } TokenType::Function => { if let Some(stmt) = self.parse_function_statement() { return Some(Statement::Function(stmt)); } } TokenType::For => { if let Some(stmt) = self.parse_for_statement() { return Some(Statement::For(stmt)); } } TokenType::While => { if let Some(stmt) = while_statement::parse(self) { return Some(Statement::While(stmt)); } } TokenType::NewLine => {} TokenType::Pipe => {} _ => { self.errors.push(ParseError { message: format!("expected keyword, found {}", self.token_text(&token)), position: self.l.token_position(&token.location), }); self.consume_until_end_of_statement(); } } return None; } fn parse_call_statement(&mut self) -> Option<CallStatement> { let name = self.expect_identifier()?; self.expect_token(TokenType::LeftParenthesis)?; let arguments = self.parse_list(|p| p.parse_expression(), TokenType::RightParenthesis)?; return Some(CallStatement { name: name, arguments: arguments, }); } pub fn end_of_statement_token(token: TokenType) -> bool { return token == TokenType::NewLine || token == TokenType::Eof || token == TokenType::Pipe; } fn parse_execute_statement(&mut self) -> Option<Statement> { let mut arguments = Vec::new(); while !Parser::end_of_statement_token(self.peek_token().token_type) { arguments.push(self.parse_expression()?); } return Some(Statement::Execute(ExecuteStatement { arguments: arguments, })); } // Let = 'let' VarName = Expression (NewLine | EOF) fn parse_let_statement(&mut self) -> Option<LetStatement> { return let_statement::parse(self); } fn consume_until_end_of_statement(&mut self) { loop { match self.lexer.next() { None => break, Some(token) => { if Parser::end_of_statement_token(token.token_type) { break; } } } } } pub fn token_text(&self, token: &Token) -> String { match token.token_type { TokenType::NewLine => "new line".to_string(), _ => format!("`{}`", self.l.token_text(&token.location).to_string()), } } // Precondition - if was already read. // // If ::= 'if' Expression NewLine Statement* 'endif' fn parse_if_statement(&mut self) -> Option<IfStatement> { return if_statement::parse(self); } fn parse_for_statement(&mut self) -> Option<ForStatement> { let loop_variable = self.parse_loop_variable()?; self.expect_token(TokenType::In)?; let range = self.parse_expression()?; self.expect_end_of_statement()?; let statements = self.parse_statements_until(TokenType::EndFor)?; Some(ForStatement { loop_variable: loop_variable, range: range, body: statements, }) } fn parse_loop_variable(&mut self) -> Option<LoopVariable> { let token = self.peek_token(); match token.token_type { TokenType::LeftBracket => self.parse_list_loop_variable(), TokenType::Ident => Some(LoopVariable::Single(self.expect_identifier()?)), _ => { self.error_and_recover("`(` or identifier", token); None } } } fn parse_list_loop_variable(&mut self) -> Option<LoopVariable> { self.expect_token(TokenType::LeftBracket)?; let vars = self.parse_list(|p| p.expect_identifier(), TokenType::RightBracket)?; return Some(LoopVariable::List(vars)); } // Parses statements until the next statement starts with given token or EOF is encountered. fn parse_statements_until(&mut self, token_type: TokenType) -> Option<Vec<Statement>> { let mut stmts = Vec::new(); while self.peek_token().token_type != TokenType::Eof && self.peek_token().token_type != token_type { // TODO: It would be nice to pass the expected token here, so that error message can // include it as well. if let Some(stmt) = self.parse_statement() { stmts.push(stmt); } } self.expect_token(token_type)?; self.expect_end_of_statement()?; return Some(stmts); } fn parse_function_statement(&mut self) -> Option<FunctionStatement> { let mut abort = false; let mut overwrite = false; if self.peek_token().token_type == TokenType::Bang { self.advance(); overwrite = true; } let name = self.expect_identifier()?; self.expect_token(TokenType::LeftParenthesis)?; let arguments = self.parse_list(|p| p.expect_identifier(), TokenType::RightParenthesis)?; if self.peek_token().token_type == TokenType::Abort { self.advance(); abort = true; } self.expect_end_of_statement()?; let body = self.parse_statements_until(TokenType::EndFunction)?; return Some(FunctionStatement { name: name, arguments: arguments, body: body, abort: abort, overwrite: overwrite, }); } // Number ::= 0 | [1-9][0-9]* // StringLiteral ::= '.*' // Expression = fn parse_expression(&mut self) -> Option<Expression> { return expression::parse(self); } // parse_list(|p| {p.parse_expression()}, TokenType::RightParenthesis) pub fn parse_list<F, T>(&mut self, mut f: F, end: TokenType) -> Option<Vec<T>> where F: FnMut(&mut Parser) -> Option<T>, { let mut result = Vec::new(); let token = self.peek_token(); if token.token_type == end { self.advance(); } else { result.push(f(self)?); loop { let token = self.peek_token(); match token.token_type { x if x == end => { self.advance(); break; } TokenType::Comma => { self.advance(); // TODO: should this be optional? It is required for dictionary literals // (which can have trailing comma), but not sure about other statements / // expressions. if self.peek_token().token_type == end { self.advance(); break; } result.push(f(self)?); } _ => { // TODO: use end instead of `)` self.error_and_recover("`,` or `)`", token); return None; } } } } return Some(result); } fn expect_end_of_statement(&mut self) -> Option<()> { let token = self.peek_token(); if Parser::end_of_statement_token(token.token_type) { self.advance(); return Some(()); } self.error_and_recover("new line", token); return None; } fn expect_token(&mut self, token_type: TokenType) -> Option<()> { let token = self.peek_token(); if token.token_type == token_type { self.advance(); return Some(()); } self.error_and_recover(token_type.as_str(), token); return None; } pub fn error_and_recover(&mut self, expected: &str, found: Token) { self.errors.push(ParseError { message: format!("expected {}, found {}", expected, self.token_text(&found)), position: self.l.token_position(&found.location), }); self.consume_until_end_of_statement(); } // If peek is identifier, returns name and advances. // Otherwise, consume until end of statement. fn expect_identifier(&mut self) -> Option<String> { let token = self.peek_token(); let name = match token.token_type { TokenType::Ident => self.identifier_name(&token), _ => { self.error_and_recover("identifier", token); return None; } }; self.advance(); Some(name) } pub fn identifier_name(&self, token: &Token) -> String { return self.l.token_text(&token.location).to_string(); } pub fn advance(&mut self) { self.lexer.next(); } pub fn peek_token(&mut self) -> Token { match self.lexer.peek() { Some(token) => token.clone(), None => self.l.eof_token(), } } } #[cfg(test)] mod tests { use super::*; use crate::lexer::SourcePosition; use pretty_assertions::assert_eq; #[test] fn returns_one_error_per_line() { let mut parser = Parser::new(Lexer::new("unknown xx()")); parser.parse(); assert_eq!( parser.errors, &[ParseError { message: "expected keyword, found `unknown`".to_string(), position: TokenPosition { start: SourcePosition { line: 0, character: 0, }, end: SourcePosition { line: 0, character: 7, }, } }] ); } // #[test] // fn parses_call_statements() { // let mut parser = Parser::new(Lexer::new("call func(l:a, l:b)")); // let program = parser.parse(); // assert_eq!(parser.errors, &[]); // assert_eq!( // program.statements, // &[Statement::Call(CallStatement { // name: "func".to_string(), // arguments: vec![ // Expression::Identifier(IdentifierExpression { // name: "l:a".to_string() // }), // Expression::Identifier(IdentifierExpression { // name: "l:b".to_string() // }) // ], // })] // ); // } // #[test] // fn parses_execute_statements() { // let mut parser = Parser::new(Lexer::new("execute l:a l:b . l:c")); // let program = parser.parse(); // assert_eq!(parser.errors, &[]); // assert_eq!( // program.statements, // &[Statement::Execute(ExecuteStatement { // arguments: vec![ // Expression::Identifier(IdentifierExpression { // name: "l:a".to_string() // }), // Expression::Infix(InfixExpression { // left: Box::new(Expression::Identifier(IdentifierExpression { // name: "l:b".to_string() // })), // operator: TokenType::Dot, // right: Box::new(Expression::Identifier(IdentifierExpression { // name: "l:c".to_string() // })), // }) // ], // })] // ); // } #[test] fn parses_function_statement() { let mut parser = Parser::new(Lexer::new( " function! my#method(arg1, arg2) abort call guess() endfunction ", )); let program = parser.parse(); assert_eq!(parser.errors, &[]); assert_eq!( program.statements, &[Statement::Function(FunctionStatement { name: "my#method".to_string(), arguments: vec!["arg1".to_string(), "arg2".to_string()], body: vec![Statement::Call(CallStatement { name: "guess".to_string(), arguments: vec![], })], overwrite: true, abort: true, })] ); } // #[test] // fn parses_for_statement_with_one_variable() { // let mut parser = Parser::new(Lexer::new( // " // for item in copy(mylist) // call guess() // endfor // ", // )); // let program = parser.parse(); // assert_eq!(parser.errors, &[]); // assert_eq!( // program.statements, // &[Statement::For(ForStatement { // loop_variable: LoopVariable::Single("item".to_string()), // range: Expression::Function(FunctionExpression { // name: "copy".to_string(), // arguments: vec![Expression::Identifier(IdentifierExpression { // name: "mylist".to_owned(), // })], // }), // body: vec![Statement::Call(CallStatement { // name: "guess".to_string(), // arguments: vec![], // })], // })] // ); // } #[test] fn parses_for_statement_with_multiple_variables() { let mut parser = Parser::new(Lexer::new( " for [a1, a2, a3] in copy(mylist) call guess() endfor ", )); let program = parser.parse(); assert_eq!(parser.errors, &[]); assert_eq!(program.statements.len(), 1); let for_stmt = match &program.statements[0] { Statement::For(stmt) => stmt, stmt => panic!(format!("expected for statement, got {:?}", stmt)), }; assert_eq!( for_stmt.loop_variable, LoopVariable::List(vec!["a1".to_string(), "a2".to_string(), "a3".to_string()]) ); match &for_stmt.range { Expression::Function(_) => {} expr => panic!(format!("expected function expression, got {:?}", expr)), }; assert_eq!( for_stmt.body, vec![Statement::Call(CallStatement { name: "guess".to_string(), arguments: vec![], })] ); } }
32.350365
113
0.520262
0e31cbd2e955ce83200951192a04ef9144bfadfb
2,354
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-pretty // ignore-test #[feature(quote)]; extern crate extra; extern crate syntax; use std::io::*; use syntax::diagnostic; use syntax::ast; use syntax::codemap; use syntax::codemap::span; use syntax::parse; use syntax::print::*; trait fake_ext_ctxt { fn cfg() -> ast::CrateConfig; fn parse_sess() -> parse::parse_sess; fn call_site() -> span; fn ident_of(st: &str) -> ast::ident; } type fake_session = parse::parse_sess; impl fake_ext_ctxt for fake_session { fn cfg() -> ast::CrateConfig { ~[] } fn parse_sess() -> parse::parse_sess { self } fn call_site() -> span { codemap::span { lo: codemap::BytePos(0), hi: codemap::BytePos(0), expn_info: None } } fn ident_of(st: &str) -> ast::ident { self.interner.intern(st) } } fn mk_ctxt() -> fake_ext_ctxt { parse::new_parse_sess(None) as fake_ext_ctxt } fn main() { let cx = mk_ctxt(); let abc = quote_expr!(cx, 23); check_pp(ext_cx, abc, pprust::print_expr, ~"23"); let ty = quote_ty!(cx, int); check_pp(ext_cx, ty, pprust::print_type, ~"int"); let item = quote_item!(cx, static x : int = 10;).get(); check_pp(ext_cx, item, pprust::print_item, ~"static x: int = 10;"); let stmt = quote_stmt!(cx, let x = 20;); check_pp(ext_cx, *stmt, pprust::print_stmt, ~"let x = 20;"); let pat = quote_pat!(cx, Some(_)); check_pp(ext_cx, pat, pprust::print_pat, ~"Some(_)"); } fn check_pp<T>(cx: fake_ext_ctxt, expr: T, f: |pprust::ps, T|, expect: ~str) { let s = io::with_str_writer(|wr| { let pp = pprust::rust_printer(wr, cx.parse_sess().interner); f(pp, expr); pp::eof(pp.s); }); stdout().write_line(s); if expect != ~"" { error!("expect: '%s', got: '%s'", expect, s); assert_eq!(s, expect); } }
25.868132
71
0.611725
918a6713c2a753e31e0f642541b44f4051489b62
658
//! Prints mouse button events. use bevy::prelude::*; fn main() { App::new() .add_plugins(DefaultPlugins) .add_system(mouse_click_system) .run(); } // This system prints messages when you press or release the left mouse button: fn mouse_click_system(mouse_button_input: Res<Input<MouseButton>>) { if mouse_button_input.pressed(MouseButton::Left) { info!("left mouse currently pressed"); } if mouse_button_input.just_pressed(MouseButton::Left) { info!("left mouse just pressed"); } if mouse_button_input.just_released(MouseButton::Left) { info!("left mouse just released"); } }
25.307692
79
0.668693
fcf11c9de9b6f686208e537fc3728415531f6eab
8,178
//! TUI interface use super::components::*; use crate::{ actions::{dump, new_gpt, read_gpt_path, Format}, Info, }; use anyhow::{Context, Result}; use byte_unit::Byte; use cursive::{ align::HAlign, theme::{BaseColor, Color, ColorStyle, ColorType, Effect, Style}, traits::Resizable, utils::markup::StyledString, view::{Nameable, View}, views::{Button, Dialog, DummyView, EditView, LinearLayout, SelectView, TextContent, TextView}, Cursive, }; use linapi::system::devices::block::Block; use parts::{uuid::Uuid, Gpt, Partition, PartitionBuilder, PartitionType}; use std::{cell::RefCell, fs, path::Path, rc::Rc, str::FromStr}; type DiskSelectVal = Rc<RefCell<Info>>; type DiskSelect = SelectView<DiskSelectVal>; type PartSelect = SelectView<Option<Partition>>; type FormatSelect = SelectView<Format>; /// Dump the GPT Partition to a file fn dump_button(root: &mut Cursive, gpt: Gpt, info: Info) { let mut view: FormatSelect = selection(); for var in &Format::variants() { view.add_item( *var, Format::from_str(var).expect("Couldn't get variant from itself.."), ) } view.set_on_submit(move |root: &mut Cursive, format: &Format| { let text = match dump(&gpt, *format, &info) { Ok(t) => { root.pop_layer(); t } Err(e) => { root.add_layer(error(e).button("Cancel", |root| { root.pop_layer(); root.pop_layer(); })); return; } }; let view = EditView::new() .on_submit(move |root, s| { match fs::write(Path::new(s), &text) { Ok(_) => { root.pop_layer(); } Err(e) => root.add_layer(error(e)), } // }) .min_width(20); let view = Dialog::around(view) .dismiss_button("Cancel") .title("Dump File") .title_position(HAlign::Left); root.add_layer(view); }); let title = "Select format"; root.add_layer(panel(title, view).min_width(title.len() + 6)); } /// Helper to setup views due to cursive oddities fn setup_views(root: &mut Cursive) { // Make sure the selection callback is run so the info box is populated. // // If theres a current selection, like when running this for `parts`, don't // change it. // // This may be None, when the user provides a path. if let Some(cb) = root.call_on_name("disks", |v: &mut DiskSelect| { v.set_selection(v.selected_id().unwrap_or(0)) }) { cb(root) } // Make sure the parts callback is run. This won't always exist, for example // when setting up `disks`. // // `disks` will call this itself. if let Some(cb) = root.call_on_name("parts", |v: &mut PartSelect| v.set_selection(0)) { cb(root); } } fn parts_shared(root: &mut Cursive, info: &DiskSelectVal, quit: ErrAction) { err( root, quit, |d| { let info = info.clone(); d.button("New Gpt", move |root| { let info = info.borrow(); let gpt = new_gpt(None, &info); root.pop_layer(); root.add_fullscreen_layer(parts_impl(gpt, &info)); setup_views(root); }) }, |root| { let info = info.borrow(); let gpt = read_gpt_path(&info)?; root.add_fullscreen_layer(parts_impl(gpt, &info)); setup_views(root); // Ok(()) }, ); } fn disks_impl() -> Result<impl View> { let disks: Vec<Block> = Block::get_connected().context("Couldn't get connected devices")?; let mut disks_view: DiskSelect = selection::<DiskSelectVal>(); for disk in disks { let label = format!( "Disk {} - {} - Model: {}", disk.name(), // Byte::from_bytes(disk.size()?.into()).get_appropriate_unit(true), disk.model()?.unwrap_or_else(|| "None".into()), ); disks_view.add_item(label, Rc::new(RefCell::new(Info::new_block(&disk)?))); } disks_view.set_on_submit(|root, info: &DiskSelectVal| { parts_shared(root, info, Dismiss); }); let disks = info_box_panel( "Disks", disks_view.with_name("disks").full_screen(), vec![DummyView], ); Ok(disks) } /// Partition editing view. fn parts_impl(gpt: Gpt, info: &Info) -> impl View { let name = &info.name; let block_size = info.block_size; let new_info = info.clone(); let _remaining = gpt.remaining(); let parts = gpt.partitions(); let mut parts_view: PartSelect = selection(); for (i, part) in parts.iter().enumerate() { let label = format!("Partition {}", i + 1); parts_view.add_item(label, Some(*part)); } parts_view.add_item( StyledString::styled( "Free Space", Style { effects: Effect::Bold.into(), color: Some(ColorStyle { front: ColorType::Color(Color::Dark(BaseColor::Green)), // FIXME: https://github.com/gyscos/cursive/issues/284 ..ColorStyle::primary() }), }, ), None, ); let part_name = TextContent::new(""); let part_start = TextContent::new(""); let part_size = TextContent::new(""); let part_uuid = TextContent::new(""); let part_type = TextContent::new(""); let info = vec![ TextView::new_with_content(part_name.clone()), TextView::new_with_content(part_start.clone()), TextView::new_with_content(part_size.clone()), TextView::new_with_content(part_uuid.clone()), TextView::new_with_content(part_type.clone()), ]; parts_view.set_on_select(move |_root: &mut Cursive, part: &Option<Partition>| { // let part = part.unwrap_or( // PartitionBuilder::new(Uuid::nil(), &gpt) // .name("None") // .start(gpt.next_usable_aligned()) // .size(remaining) // .partition_type(PartitionType::Unused) // .finish(), // ); let part = part.unwrap(); // part_name.set_content(format!("Name: {}", part.name())); part_start.set_content(format!("Start: {}", part.start())); part_size.set_content(format!( "Size: {}", Byte::from_bytes((part.end().0 - part.start().0 + block_size.get()).into()) .get_appropriate_unit(true) )); part_uuid.set_content(format!("UUID: {}", part.uuid())); part_type.set_content(format!("Type: {}", part.partition_type())); // let _ = Uuid::nil(); type _A = PartitionBuilder; type _B = PartitionType; todo!("Fuck"); }); // let mut buttons = LinearLayout::horizontal() .child(DummyView.full_width()) .child(Button::new("Dump", move |root| { dump_button(root, gpt.clone(), new_info.clone()); })) .child(DummyView) .child(Button::new("Test 2", |_| ())) .child(DummyView.full_width()); buttons .set_focus_index(1) .expect("First button didn't accept focus"); let buttons = focused_view(buttons.with_name("buttons")); // horizontal_forward::<LinearLayout, _>( info_box_panel_footer( &format!("Partitions ({})", name), parts_view.with_name("parts").full_screen(), info, buttons, ), "buttons", ) } /// Disk Selection Display pub fn disks(root: &mut Cursive) { err( root, Quit, |d| d, |root| { root.add_fullscreen_layer(disks_impl()?); setup_views(root); Ok(()) }, ); } /// Partition Display pub fn parts(_root: &mut Cursive, _info: &Info) { todo!("Parts") // parts_shared(root, info, Quit); }
32.712
98
0.54133
398de5163a32fe97e0072b8835bd03cbb46c5fc6
16,504
use crate::datatypes::UInt64Chunked; use crate::prelude::*; use crate::utils::arrow::array::Array; use crate::POOL; use ahash::{CallHasher, RandomState}; use arrow::bitmap::utils::get_bit_unchecked; use hashbrown::{hash_map::RawEntryMut, HashMap}; use polars_arrow::utils::CustomIterTools; use rayon::prelude::*; use std::convert::TryInto; use std::hash::{BuildHasher, BuildHasherDefault, Hash, Hasher}; // Read more: // https://www.cockroachlabs.com/blog/vectorized-hash-joiner/ // http://myeyesareblind.com/2017/02/06/Combine-hash-values/ pub trait VecHash { /// Compute the hash for all values in the array. /// /// This currently only works with the AHash RandomState hasher builder. fn vec_hash(&self, _random_state: RandomState) -> Vec<u64> { unimplemented!() } fn vec_hash_combine(&self, _random_state: RandomState, _hashes: &mut [u64]) { unimplemented!() } } pub(crate) fn get_null_hash_value(random_state: RandomState) -> u64 { // we just start with a large prime number and hash that twice // to get a constant hash value for null/None let mut hasher = random_state.build_hasher(); 3188347919usize.hash(&mut hasher); let first = hasher.finish(); let mut hasher = random_state.build_hasher(); first.hash(&mut hasher); hasher.finish() } impl<T> VecHash for ChunkedArray<T> where T: PolarsIntegerType, T::Native: Hash + CallHasher, { fn vec_hash(&self, random_state: RandomState) -> Vec<u64> { // Note that we don't use the no null branch! This can break in unexpected ways. // for instance with threading we split an array in n_threads, this may lead to // splits that have no nulls and splits that have nulls. Then one array is hashed with // Option<T> and the other array with T. // Meaning that they cannot be compared. By always hashing on Option<T> the random_state is // the only deterministic seed. let mut av = Vec::with_capacity(self.len()); self.downcast_iter().for_each(|arr| { av.extend( arr.values() .as_slice() .iter() .map(|v| T::Native::get_hash(v, &random_state)), ); }); let null_h = get_null_hash_value(random_state); let hashes = av.as_mut_slice(); let mut offset = 0; self.downcast_iter().for_each(|arr| { if let Some(validity) = arr.validity() { let (slice, byte_offset, _) = validity.as_slice(); (0..validity.len()) .map(|i| unsafe { get_bit_unchecked(slice, i + byte_offset) }) .zip(&mut hashes[offset..]) .for_each(|(valid, h)| { *h = [null_h, *h][valid as usize]; }) } offset += arr.len(); }); av } fn vec_hash_combine(&self, random_state: RandomState, hashes: &mut [u64]) { let null_h = get_null_hash_value(random_state.clone()); let mut offset = 0; self.downcast_iter().for_each(|arr| { match arr.null_count() { 0 => arr .values() .as_slice() .iter() .zip(&mut hashes[offset..]) .for_each(|(v, h)| { let l = T::Native::get_hash(v, &random_state); *h = boost_hash_combine(l, *h) }), _ => { let validity = arr.validity().unwrap(); let (slice, byte_offset, _) = validity.as_slice(); (0..validity.len()) .map(|i| unsafe { get_bit_unchecked(slice, i + byte_offset) }) .zip(&mut hashes[offset..]) .zip(arr.values().as_slice()) .for_each(|((valid, h), l)| { *h = boost_hash_combine( [null_h, T::Native::get_hash(l, &random_state)][valid as usize], *h, ) }); } } offset += arr.len(); }); } } impl VecHash for Utf8Chunked { fn vec_hash(&self, random_state: RandomState) -> Vec<u64> { let null_h = get_null_hash_value(random_state.clone()); let mut av = Vec::with_capacity(self.len()); self.downcast_iter().for_each(|arr| { av.extend(arr.into_iter().map(|opt_v| match opt_v { Some(v) => str::get_hash(v, &random_state), None => null_h, })) }); av } fn vec_hash_combine(&self, random_state: RandomState, hashes: &mut [u64]) { let null_h = get_null_hash_value(random_state.clone()); self.apply_to_slice( |opt_v, h| { let l = match opt_v { Some(v) => str::get_hash(v, &random_state), None => null_h, }; boost_hash_combine(l, *h) }, hashes, ) } } impl VecHash for BooleanChunked { fn vec_hash(&self, random_state: RandomState) -> Vec<u64> { let mut av = Vec::with_capacity(self.len()); self.downcast_iter().for_each(|arr| { av.extend(arr.into_iter().map(|opt_v| { let mut hasher = random_state.build_hasher(); opt_v.hash(&mut hasher); hasher.finish() })) }); av } fn vec_hash_combine(&self, random_state: RandomState, hashes: &mut [u64]) { self.apply_to_slice( |opt_v, h| { let mut hasher = random_state.build_hasher(); opt_v.hash(&mut hasher); boost_hash_combine(hasher.finish(), *h) }, hashes, ) } } impl VecHash for Float32Chunked { fn vec_hash(&self, random_state: RandomState) -> Vec<u64> { self.bit_repr_small().vec_hash(random_state) } fn vec_hash_combine(&self, random_state: RandomState, hashes: &mut [u64]) { self.bit_repr_small().vec_hash_combine(random_state, hashes) } } impl VecHash for Float64Chunked { fn vec_hash(&self, random_state: RandomState) -> Vec<u64> { self.bit_repr_large().vec_hash(random_state) } fn vec_hash_combine(&self, random_state: RandomState, hashes: &mut [u64]) { self.bit_repr_large().vec_hash_combine(random_state, hashes) } } impl VecHash for ListChunked {} #[cfg(feature = "object")] impl<T> VecHash for ObjectChunked<T> where T: PolarsObject, { fn vec_hash(&self, random_state: RandomState) -> Vec<u64> { // Note that we don't use the no null branch! This can break in unexpected ways. // for instance with threading we split an array in n_threads, this may lead to // splits that have no nulls and splits that have nulls. Then one array is hashed with // Option<T> and the other array with T. // Meaning that they cannot be compared. By always hashing on Option<T> the random_state is // the only deterministic seed. let mut av = Vec::with_capacity(self.len()); self.downcast_iter().for_each(|arr| { av.extend(arr.into_iter().map(|opt_v| { let mut hasher = random_state.build_hasher(); opt_v.hash(&mut hasher); hasher.finish() })) }); av } fn vec_hash_combine(&self, random_state: RandomState, hashes: &mut [u64]) { self.apply_to_slice( |opt_v, h| { let mut hasher = random_state.build_hasher(); opt_v.hash(&mut hasher); boost_hash_combine(hasher.finish(), *h) }, hashes, ) } } // Used to to get a u64 from the hashing keys // We need to modify the hashing algorithm to use the hash for this and only compute the hash once. pub(crate) trait AsU64 { #[allow(clippy::wrong_self_convention)] fn as_u64(self) -> u64; } impl AsU64 for u32 { fn as_u64(self) -> u64 { self as u64 } } impl AsU64 for u64 { fn as_u64(self) -> u64 { self } } impl AsU64 for Option<u32> { fn as_u64(self) -> u64 { match self { Some(v) => v as u64, // just a number safe from overflow None => u64::MAX >> 2, } } } impl AsU64 for Option<u64> { fn as_u64(self) -> u64 { self.unwrap_or(u64::MAX >> 2) } } impl AsU64 for [u8; 9] { fn as_u64(self) -> u64 { // the last byte includes the null information. // that one is skipped. Worst thing that could happen is unbalanced partition. u64::from_ne_bytes(self[..8].try_into().unwrap()) } } const BUILD_HASHER: RandomState = RandomState::with_seeds(0, 0, 0, 0); impl AsU64 for [u8; 17] { fn as_u64(self) -> u64 { <[u8]>::get_hash(&self, &BUILD_HASHER) } } impl AsU64 for [u8; 13] { fn as_u64(self) -> u64 { <[u8]>::get_hash(&self, &BUILD_HASHER) } } #[derive(Default)] pub struct IdHasher { hash: u64, } impl Hasher for IdHasher { fn finish(&self) -> u64 { self.hash } fn write(&mut self, _bytes: &[u8]) { unreachable!("IdHasher should only be used for integer keys <= 64 bit precision") } fn write_u32(&mut self, i: u32) { self.write_u64(i as u64) } fn write_u64(&mut self, i: u64) { self.hash = i; } fn write_i32(&mut self, i: i32) { // Safety: // same number of bits unsafe { self.write_u32(std::mem::transmute::<i32, u32>(i)) } } fn write_i64(&mut self, i: i64) { // Safety: // same number of bits unsafe { self.write_u64(std::mem::transmute::<i64, u64>(i)) } } } pub type IdBuildHasher = BuildHasherDefault<IdHasher>; #[derive(Debug)] /// Contains an idx of a row in a DataFrame and the precomputed hash of that row. /// That hash still needs to be used to create another hash to be able to resize hashmaps without /// accidental quadratic behavior. So do not use an Identity function! pub(crate) struct IdxHash { // idx in row of Series, DataFrame pub(crate) idx: IdxSize, // precomputed hash of T hash: u64, } impl Hash for IdxHash { fn hash<H: Hasher>(&self, state: &mut H) { state.write_u64(self.hash) } } impl IdxHash { #[inline] pub(crate) fn new(idx: IdxSize, hash: u64) -> Self { IdxHash { idx, hash } } } /// Contains a ptr to the string slice an the precomputed hash of that string. /// During rehashes, we will rehash the hash instead of the string, that makes rehashing /// cheap and allows cache coherent small hash tables. #[derive(Eq, Copy, Clone)] pub(crate) struct StrHash<'a> { str: Option<&'a str>, hash: u64, } impl<'a> Hash for StrHash<'a> { fn hash<H: Hasher>(&self, state: &mut H) { state.write_u64(self.hash) } } impl<'a> StrHash<'a> { pub(crate) fn new(s: Option<&'a str>, hash: u64) -> Self { Self { str: s, hash } } } impl<'a> PartialEq for StrHash<'a> { fn eq(&self, other: &Self) -> bool { self.str == other.str } } impl<'a> AsU64 for StrHash<'a> { fn as_u64(self) -> u64 { self.hash } } #[inline] /// For partitions that are a power of 2 we can use a bitshift instead of a modulo. pub(crate) fn this_partition(h: u64, thread_no: u64, n_partitions: u64) -> bool { // n % 2^i = n & (2^i - 1) (h.wrapping_add(thread_no)) & n_partitions.wrapping_sub(1) == 0 } pub(crate) fn prepare_hashed_relation_threaded<T, I>( iters: Vec<I>, ) -> Vec<HashMap<T, (bool, Vec<IdxSize>), RandomState>> where I: Iterator<Item = T> + Send + TrustedLen, T: Send + Hash + Eq + Sync + Copy, { let n_partitions = iters.len(); assert!(n_partitions.is_power_of_two()); let (hashes_and_keys, build_hasher) = create_hash_and_keys_threaded_vectorized(iters, None); // We will create a hashtable in every thread. // We use the hash to partition the keys to the matching hashtable. // Every thread traverses all keys/hashes and ignores the ones that doesn't fall in that partition. POOL.install(|| { (0..n_partitions).into_par_iter().map(|partition_no| { let build_hasher = build_hasher.clone(); let hashes_and_keys = &hashes_and_keys; let partition_no = partition_no as u64; let mut hash_tbl: HashMap<T, (bool, Vec<IdxSize>), RandomState> = HashMap::with_hasher(build_hasher); let n_threads = n_partitions as u64; let mut offset = 0; for hashes_and_keys in hashes_and_keys { let len = hashes_and_keys.len(); hashes_and_keys .iter() .enumerate() .for_each(|(idx, (h, k))| { let idx = idx as IdxSize; // partition hashes by thread no. // So only a part of the hashes go to this hashmap if this_partition(*h, partition_no, n_threads) { let idx = idx + offset; let entry = hash_tbl .raw_entry_mut() // uses the key to check equality to find and entry .from_key_hashed_nocheck(*h, k); match entry { RawEntryMut::Vacant(entry) => { entry.insert_hashed_nocheck(*h, *k, (false, vec![idx])); } RawEntryMut::Occupied(mut entry) => { let (_k, v) = entry.get_key_value_mut(); v.1.push(idx); } } } }); offset += len as IdxSize; } hash_tbl }) }) .collect() } pub(crate) fn create_hash_and_keys_threaded_vectorized<I, T>( iters: Vec<I>, build_hasher: Option<RandomState>, ) -> (Vec<Vec<(u64, T)>>, RandomState) where I: IntoIterator<Item = T> + Send, I::IntoIter: TrustedLen, T: Send + Hash + Eq, { let build_hasher = build_hasher.unwrap_or_default(); let hashes = POOL.install(|| { iters .into_par_iter() .map(|iter| { // create hashes and keys iter.into_iter() .map(|val| { let mut hasher = build_hasher.build_hasher(); val.hash(&mut hasher); (hasher.finish(), val) }) .collect_trusted::<Vec<_>>() }) .collect() }); (hashes, build_hasher) } // hash combine from c++' boost lib #[inline] pub(crate) fn boost_hash_combine(l: u64, r: u64) -> u64 { l ^ r.wrapping_add(0x9e3779b9u64.wrapping_add(l << 6).wrapping_add(r >> 2)) } pub(crate) fn df_rows_to_hashes_threaded( keys: &[DataFrame], hasher_builder: Option<RandomState>, ) -> (Vec<UInt64Chunked>, RandomState) { let hasher_builder = hasher_builder.unwrap_or_default(); let hashes = POOL.install(|| { keys.into_par_iter() .map(|df| { let hb = hasher_builder.clone(); let (ca, _) = df_rows_to_hashes(df, Some(hb)); ca }) .collect() }); (hashes, hasher_builder) } pub(crate) fn df_rows_to_hashes( keys: &DataFrame, build_hasher: Option<RandomState>, ) -> (UInt64Chunked, RandomState) { let build_hasher = build_hasher.unwrap_or_default(); let mut iter = keys.iter(); let first = iter.next().expect("at least one key"); let mut hashes = first.vec_hash(build_hasher.clone()); let hslice = hashes.as_mut_slice(); for keys in iter { keys.vec_hash_combine(build_hasher.clone(), hslice); } let chunks = vec![Arc::new(PrimitiveArray::from_data( ArrowDataType::UInt64, hashes.into(), None, )) as ArrayRef]; (UInt64Chunked::from_chunks("", chunks), build_hasher) }
31.799615
103
0.548715
2f430842f9d8e958fd8ded11085579b4557b7e64
97,227
//! This module contains the "cleaned" pieces of the AST, and the functions //! that clean them. mod auto_trait; mod blanket_impl; crate mod cfg; crate mod inline; mod simplify; crate mod types; crate mod utils; use rustc_ast as ast; use rustc_attr as attr; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_hir as hir; use rustc_hir::def::{CtorKind, DefKind, Res}; use rustc_hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc_index::vec::{Idx, IndexVec}; use rustc_infer::infer::region_constraints::{Constraint, RegionConstraintData}; use rustc_middle::bug; use rustc_middle::middle::resolve_lifetime as rl; use rustc_middle::ty::fold::TypeFolder; use rustc_middle::ty::subst::{InternalSubsts, Subst}; use rustc_middle::ty::{self, AdtKind, Lift, Ty, TyCtxt}; use rustc_mir::const_eval::{is_const_fn, is_min_const_fn, is_unstable_const_fn}; use rustc_span::hygiene::{AstPass, MacroKind}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{self, ExpnKind}; use rustc_typeck::hir_ty_to_ty; use std::collections::hash_map::Entry; use std::default::Default; use std::hash::Hash; use std::rc::Rc; use std::{mem, vec}; use crate::core::{self, DocContext, ImplTraitParam}; use crate::doctree; use utils::*; crate use utils::{get_auto_trait_and_blanket_impls, krate, register_res}; crate use self::types::FnRetTy::*; crate use self::types::ItemKind::*; crate use self::types::SelfTy::*; crate use self::types::Type::*; crate use self::types::Visibility::{Inherited, Public}; crate use self::types::*; crate trait Clean<T> { fn clean(&self, cx: &DocContext<'_>) -> T; } impl<T: Clean<U>, U> Clean<Vec<U>> for [T] { fn clean(&self, cx: &DocContext<'_>) -> Vec<U> { self.iter().map(|x| x.clean(cx)).collect() } } impl<T: Clean<U>, U, V: Idx> Clean<IndexVec<V, U>> for IndexVec<V, T> { fn clean(&self, cx: &DocContext<'_>) -> IndexVec<V, U> { self.iter().map(|x| x.clean(cx)).collect() } } impl<T: Clean<U>, U> Clean<U> for &T { fn clean(&self, cx: &DocContext<'_>) -> U { (**self).clean(cx) } } impl<T: Clean<U>, U> Clean<U> for Rc<T> { fn clean(&self, cx: &DocContext<'_>) -> U { (**self).clean(cx) } } impl<T: Clean<U>, U> Clean<Option<U>> for Option<T> { fn clean(&self, cx: &DocContext<'_>) -> Option<U> { self.as_ref().map(|v| v.clean(cx)) } } impl Clean<ExternalCrate> for CrateNum { fn clean(&self, cx: &DocContext<'_>) -> ExternalCrate { let root = DefId { krate: *self, index: CRATE_DEF_INDEX }; let krate_span = cx.tcx.def_span(root); let krate_src = cx.sess().source_map().span_to_filename(krate_span); // Collect all inner modules which are tagged as implementations of // primitives. // // Note that this loop only searches the top-level items of the crate, // and this is intentional. If we were to search the entire crate for an // item tagged with `#[doc(primitive)]` then we would also have to // search the entirety of external modules for items tagged // `#[doc(primitive)]`, which is a pretty inefficient process (decoding // all that metadata unconditionally). // // In order to keep the metadata load under control, the // `#[doc(primitive)]` feature is explicitly designed to only allow the // primitive tags to show up as the top level items in a crate. // // Also note that this does not attempt to deal with modules tagged // duplicately for the same primitive. This is handled later on when // rendering by delegating everything to a hash map. let as_primitive = |res: Res| { if let Res::Def(DefKind::Mod, def_id) = res { let attrs = cx.tcx.get_attrs(def_id).clean(cx); let mut prim = None; for attr in attrs.lists(sym::doc) { if let Some(v) = attr.value_str() { if attr.has_name(sym::primitive) { prim = PrimitiveType::from_symbol(v); if prim.is_some() { break; } // FIXME: should warn on unknown primitives? } } } return prim.map(|p| (def_id, p)); } None }; let primitives = if root.is_local() { cx.tcx .hir() .krate() .item .module .item_ids .iter() .filter_map(|&id| { let item = cx.tcx.hir().expect_item(id.id); match item.kind { hir::ItemKind::Mod(_) => as_primitive(Res::Def( DefKind::Mod, cx.tcx.hir().local_def_id(id.id).to_def_id(), )), hir::ItemKind::Use(ref path, hir::UseKind::Single) if item.vis.node.is_pub() => { as_primitive(path.res).map(|(_, prim)| { // Pretend the primitive is local. (cx.tcx.hir().local_def_id(id.id).to_def_id(), prim) }) } _ => None, } }) .collect() } else { cx.tcx .item_children(root) .iter() .map(|item| item.res) .filter_map(as_primitive) .collect() }; let as_keyword = |res: Res| { if let Res::Def(DefKind::Mod, def_id) = res { let attrs = cx.tcx.get_attrs(def_id).clean(cx); let mut keyword = None; for attr in attrs.lists(sym::doc) { if attr.has_name(sym::keyword) { if let Some(v) = attr.value_str() { keyword = Some(v); break; } } } return keyword.map(|p| (def_id, p)); } None }; let keywords = if root.is_local() { cx.tcx .hir() .krate() .item .module .item_ids .iter() .filter_map(|&id| { let item = cx.tcx.hir().expect_item(id.id); match item.kind { hir::ItemKind::Mod(_) => as_keyword(Res::Def( DefKind::Mod, cx.tcx.hir().local_def_id(id.id).to_def_id(), )), hir::ItemKind::Use(ref path, hir::UseKind::Single) if item.vis.node.is_pub() => { as_keyword(path.res).map(|(_, prim)| { (cx.tcx.hir().local_def_id(id.id).to_def_id(), prim) }) } _ => None, } }) .collect() } else { cx.tcx.item_children(root).iter().map(|item| item.res).filter_map(as_keyword).collect() }; ExternalCrate { name: cx.tcx.crate_name(*self), src: krate_src, attrs: cx.tcx.get_attrs(root).clean(cx), primitives, keywords, } } } impl Clean<Item> for doctree::Module<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { let mut items: Vec<Item> = vec![]; items.extend(self.imports.iter().flat_map(|x| x.clean(cx))); items.extend(self.foreigns.iter().map(|x| x.clean(cx))); items.extend(self.mods.iter().map(|x| x.clean(cx))); items.extend(self.items.iter().map(|x| x.clean(cx)).flatten()); items.extend(self.macros.iter().map(|x| x.clean(cx))); // determine if we should display the inner contents or // the outer `mod` item for the source code. let span = { let sm = cx.sess().source_map(); let outer = sm.lookup_char_pos(self.where_outer.lo()); let inner = sm.lookup_char_pos(self.where_inner.lo()); if outer.file.start_pos == inner.file.start_pos { // mod foo { ... } self.where_outer } else { // mod foo; (and a separate SourceFile for the contents) self.where_inner } }; let what_rustc_thinks = Item::from_hir_id_and_parts( self.id, self.name, ModuleItem(Module { is_crate: self.is_crate, items }), cx, ); Item { source: span.clean(cx), ..what_rustc_thinks } } } impl Clean<Attributes> for [ast::Attribute] { fn clean(&self, cx: &DocContext<'_>) -> Attributes { Attributes::from_ast(cx.sess().diagnostic(), self, None) } } impl Clean<GenericBound> for hir::GenericBound<'_> { fn clean(&self, cx: &DocContext<'_>) -> GenericBound { match *self { hir::GenericBound::Outlives(lt) => GenericBound::Outlives(lt.clean(cx)), hir::GenericBound::LangItemTrait(lang_item, span, _, generic_args) => { let def_id = cx.tcx.require_lang_item(lang_item, Some(span)); let trait_ref = ty::TraitRef::identity(cx.tcx, def_id); let generic_args = generic_args.clean(cx); let bindings = match generic_args { GenericArgs::AngleBracketed { bindings, .. } => bindings, _ => bug!("clean: parenthesized `GenericBound::LangItemTrait`"), }; GenericBound::TraitBound( PolyTrait { trait_: (trait_ref, &*bindings).clean(cx), generic_params: vec![] }, hir::TraitBoundModifier::None, ) } hir::GenericBound::Trait(ref t, modifier) => { GenericBound::TraitBound(t.clean(cx), modifier) } } } } impl Clean<Type> for (ty::TraitRef<'_>, &[TypeBinding]) { fn clean(&self, cx: &DocContext<'_>) -> Type { let (trait_ref, bounds) = *self; inline::record_extern_fqn(cx, trait_ref.def_id, TypeKind::Trait); let path = external_path( cx, cx.tcx.item_name(trait_ref.def_id), Some(trait_ref.def_id), true, bounds.to_vec(), trait_ref.substs, ); debug!("ty::TraitRef\n subst: {:?}\n", trait_ref.substs); ResolvedPath { path, param_names: None, did: trait_ref.def_id, is_generic: false } } } impl<'tcx> Clean<GenericBound> for ty::TraitRef<'tcx> { fn clean(&self, cx: &DocContext<'_>) -> GenericBound { GenericBound::TraitBound( PolyTrait { trait_: (*self, &[][..]).clean(cx), generic_params: vec![] }, hir::TraitBoundModifier::None, ) } } impl Clean<GenericBound> for (ty::PolyTraitRef<'_>, &[TypeBinding]) { fn clean(&self, cx: &DocContext<'_>) -> GenericBound { let (poly_trait_ref, bounds) = *self; let poly_trait_ref = poly_trait_ref.lift_to_tcx(cx.tcx).unwrap(); // collect any late bound regions let late_bound_regions: Vec<_> = cx .tcx .collect_referenced_late_bound_regions(&poly_trait_ref) .into_iter() .filter_map(|br| match br { ty::BrNamed(_, name) => { Some(GenericParamDef { name, kind: GenericParamDefKind::Lifetime }) } _ => None, }) .collect(); GenericBound::TraitBound( PolyTrait { trait_: (poly_trait_ref.skip_binder(), bounds).clean(cx), generic_params: late_bound_regions, }, hir::TraitBoundModifier::None, ) } } impl<'tcx> Clean<GenericBound> for ty::PolyTraitRef<'tcx> { fn clean(&self, cx: &DocContext<'_>) -> GenericBound { (*self, &[][..]).clean(cx) } } impl<'tcx> Clean<Option<Vec<GenericBound>>> for InternalSubsts<'tcx> { fn clean(&self, cx: &DocContext<'_>) -> Option<Vec<GenericBound>> { let mut v = Vec::new(); v.extend(self.regions().filter_map(|r| r.clean(cx)).map(GenericBound::Outlives)); v.extend(self.types().map(|t| { GenericBound::TraitBound( PolyTrait { trait_: t.clean(cx), generic_params: Vec::new() }, hir::TraitBoundModifier::None, ) })); if !v.is_empty() { Some(v) } else { None } } } impl Clean<Lifetime> for hir::Lifetime { fn clean(&self, cx: &DocContext<'_>) -> Lifetime { let def = cx.tcx.named_region(self.hir_id); match def { Some( rl::Region::EarlyBound(_, node_id, _) | rl::Region::LateBound(_, node_id, _) | rl::Region::Free(_, node_id), ) => { if let Some(lt) = cx.lt_substs.borrow().get(&node_id).cloned() { return lt; } } _ => {} } Lifetime(self.name.ident().name) } } impl Clean<Lifetime> for hir::GenericParam<'_> { fn clean(&self, _: &DocContext<'_>) -> Lifetime { match self.kind { hir::GenericParamKind::Lifetime { .. } => { if !self.bounds.is_empty() { let mut bounds = self.bounds.iter().map(|bound| match bound { hir::GenericBound::Outlives(lt) => lt, _ => panic!(), }); let name = bounds.next().expect("no more bounds").name.ident(); let mut s = format!("{}: {}", self.name.ident(), name); for bound in bounds { s.push_str(&format!(" + {}", bound.name.ident())); } Lifetime(Symbol::intern(&s)) } else { Lifetime(self.name.ident().name) } } _ => panic!(), } } } impl Clean<Constant> for hir::ConstArg { fn clean(&self, cx: &DocContext<'_>) -> Constant { Constant { type_: cx .tcx .type_of(cx.tcx.hir().body_owner_def_id(self.value.body).to_def_id()) .clean(cx), expr: print_const_expr(cx, self.value.body), value: None, is_literal: is_literal_expr(cx, self.value.body.hir_id), } } } impl Clean<Lifetime> for ty::GenericParamDef { fn clean(&self, _cx: &DocContext<'_>) -> Lifetime { Lifetime(self.name) } } impl Clean<Option<Lifetime>> for ty::RegionKind { fn clean(&self, _cx: &DocContext<'_>) -> Option<Lifetime> { match *self { ty::ReStatic => Some(Lifetime::statik()), ty::ReLateBound(_, ty::BoundRegion { kind: ty::BrNamed(_, name) }) => { Some(Lifetime(name)) } ty::ReEarlyBound(ref data) => Some(Lifetime(data.name)), ty::ReLateBound(..) | ty::ReFree(..) | ty::ReVar(..) | ty::RePlaceholder(..) | ty::ReEmpty(_) | ty::ReErased => { debug!("cannot clean region {:?}", self); None } } } } impl Clean<WherePredicate> for hir::WherePredicate<'_> { fn clean(&self, cx: &DocContext<'_>) -> WherePredicate { match *self { hir::WherePredicate::BoundPredicate(ref wbp) => WherePredicate::BoundPredicate { ty: wbp.bounded_ty.clean(cx), bounds: wbp.bounds.clean(cx), }, hir::WherePredicate::RegionPredicate(ref wrp) => WherePredicate::RegionPredicate { lifetime: wrp.lifetime.clean(cx), bounds: wrp.bounds.clean(cx), }, hir::WherePredicate::EqPredicate(ref wrp) => { WherePredicate::EqPredicate { lhs: wrp.lhs_ty.clean(cx), rhs: wrp.rhs_ty.clean(cx) } } } } } impl<'a> Clean<Option<WherePredicate>> for ty::Predicate<'a> { fn clean(&self, cx: &DocContext<'_>) -> Option<WherePredicate> { let bound_predicate = self.bound_atom(); match bound_predicate.skip_binder() { ty::PredicateAtom::Trait(pred, _) => Some(bound_predicate.rebind(pred).clean(cx)), ty::PredicateAtom::RegionOutlives(pred) => pred.clean(cx), ty::PredicateAtom::TypeOutlives(pred) => pred.clean(cx), ty::PredicateAtom::Projection(pred) => Some(pred.clean(cx)), ty::PredicateAtom::Subtype(..) | ty::PredicateAtom::WellFormed(..) | ty::PredicateAtom::ObjectSafe(..) | ty::PredicateAtom::ClosureKind(..) | ty::PredicateAtom::ConstEvaluatable(..) | ty::PredicateAtom::ConstEquate(..) | ty::PredicateAtom::TypeWellFormedFromEnv(..) => panic!("not user writable"), } } } impl<'a> Clean<WherePredicate> for ty::PolyTraitPredicate<'a> { fn clean(&self, cx: &DocContext<'_>) -> WherePredicate { let poly_trait_ref = self.map_bound(|pred| pred.trait_ref); WherePredicate::BoundPredicate { ty: poly_trait_ref.skip_binder().self_ty().clean(cx), bounds: vec![poly_trait_ref.clean(cx)], } } } impl<'tcx> Clean<Option<WherePredicate>> for ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>> { fn clean(&self, cx: &DocContext<'_>) -> Option<WherePredicate> { let ty::OutlivesPredicate(a, b) = self; if let (ty::ReEmpty(_), ty::ReEmpty(_)) = (a, b) { return None; } Some(WherePredicate::RegionPredicate { lifetime: a.clean(cx).expect("failed to clean lifetime"), bounds: vec![GenericBound::Outlives(b.clean(cx).expect("failed to clean bounds"))], }) } } impl<'tcx> Clean<Option<WherePredicate>> for ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>> { fn clean(&self, cx: &DocContext<'_>) -> Option<WherePredicate> { let ty::OutlivesPredicate(ty, lt) = self; if let ty::ReEmpty(_) = lt { return None; } Some(WherePredicate::BoundPredicate { ty: ty.clean(cx), bounds: vec![GenericBound::Outlives(lt.clean(cx).expect("failed to clean lifetimes"))], }) } } impl<'tcx> Clean<WherePredicate> for ty::ProjectionPredicate<'tcx> { fn clean(&self, cx: &DocContext<'_>) -> WherePredicate { let ty::ProjectionPredicate { projection_ty, ty } = self; WherePredicate::EqPredicate { lhs: projection_ty.clean(cx), rhs: ty.clean(cx) } } } impl<'tcx> Clean<Type> for ty::ProjectionTy<'tcx> { fn clean(&self, cx: &DocContext<'_>) -> Type { let lifted = self.lift_to_tcx(cx.tcx).unwrap(); let trait_ = match lifted.trait_ref(cx.tcx).clean(cx) { GenericBound::TraitBound(t, _) => t.trait_, GenericBound::Outlives(_) => panic!("cleaning a trait got a lifetime"), }; Type::QPath { name: cx.tcx.associated_item(self.item_def_id).ident.name, self_type: box self.self_ty().clean(cx), trait_: box trait_, } } } impl Clean<GenericParamDef> for ty::GenericParamDef { fn clean(&self, cx: &DocContext<'_>) -> GenericParamDef { let (name, kind) = match self.kind { ty::GenericParamDefKind::Lifetime => (self.name, GenericParamDefKind::Lifetime), ty::GenericParamDefKind::Type { has_default, synthetic, .. } => { let default = if has_default { Some(cx.tcx.type_of(self.def_id).clean(cx)) } else { None }; ( self.name, GenericParamDefKind::Type { did: self.def_id, bounds: vec![], // These are filled in from the where-clauses. default, synthetic, }, ) } ty::GenericParamDefKind::Const { .. } => ( self.name, GenericParamDefKind::Const { did: self.def_id, ty: cx.tcx.type_of(self.def_id).clean(cx), }, ), }; GenericParamDef { name, kind } } } impl Clean<GenericParamDef> for hir::GenericParam<'_> { fn clean(&self, cx: &DocContext<'_>) -> GenericParamDef { let (name, kind) = match self.kind { hir::GenericParamKind::Lifetime { .. } => { let name = if !self.bounds.is_empty() { let mut bounds = self.bounds.iter().map(|bound| match bound { hir::GenericBound::Outlives(lt) => lt, _ => panic!(), }); let name = bounds.next().expect("no more bounds").name.ident(); let mut s = format!("{}: {}", self.name.ident(), name); for bound in bounds { s.push_str(&format!(" + {}", bound.name.ident())); } Symbol::intern(&s) } else { self.name.ident().name }; (name, GenericParamDefKind::Lifetime) } hir::GenericParamKind::Type { ref default, synthetic } => ( self.name.ident().name, GenericParamDefKind::Type { did: cx.tcx.hir().local_def_id(self.hir_id).to_def_id(), bounds: self.bounds.clean(cx), default: default.clean(cx), synthetic, }, ), hir::GenericParamKind::Const { ref ty, default: _ } => ( self.name.ident().name, GenericParamDefKind::Const { did: cx.tcx.hir().local_def_id(self.hir_id).to_def_id(), ty: ty.clean(cx), // FIXME(const_generics_defaults): add `default` field here for docs }, ), }; GenericParamDef { name, kind } } } impl Clean<Generics> for hir::Generics<'_> { fn clean(&self, cx: &DocContext<'_>) -> Generics { // Synthetic type-parameters are inserted after normal ones. // In order for normal parameters to be able to refer to synthetic ones, // scans them first. fn is_impl_trait(param: &hir::GenericParam<'_>) -> bool { match param.kind { hir::GenericParamKind::Type { synthetic, .. } => { synthetic == Some(hir::SyntheticTyParamKind::ImplTrait) } _ => false, } } /// This can happen for `async fn`, e.g. `async fn f<'_>(&'_ self)`. /// /// See [`lifetime_to_generic_param`] in [`rustc_ast_lowering`] for more information. /// /// [`lifetime_to_generic_param`]: rustc_ast_lowering::LoweringContext::lifetime_to_generic_param fn is_elided_lifetime(param: &hir::GenericParam<'_>) -> bool { match param.kind { hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Elided } => true, _ => false, } } let impl_trait_params = self .params .iter() .filter(|param| is_impl_trait(param)) .map(|param| { let param: GenericParamDef = param.clean(cx); match param.kind { GenericParamDefKind::Lifetime => unreachable!(), GenericParamDefKind::Type { did, ref bounds, .. } => { cx.impl_trait_bounds.borrow_mut().insert(did.into(), bounds.clone()); } GenericParamDefKind::Const { .. } => unreachable!(), } param }) .collect::<Vec<_>>(); let mut params = Vec::with_capacity(self.params.len()); for p in self.params.iter().filter(|p| !is_impl_trait(p) && !is_elided_lifetime(p)) { let p = p.clean(cx); params.push(p); } params.extend(impl_trait_params); let mut generics = Generics { params, where_predicates: self.where_clause.predicates.clean(cx) }; // Some duplicates are generated for ?Sized bounds between type params and where // predicates. The point in here is to move the bounds definitions from type params // to where predicates when such cases occur. for where_pred in &mut generics.where_predicates { match *where_pred { WherePredicate::BoundPredicate { ty: Generic(ref name), ref mut bounds } => { if bounds.is_empty() { for param in &mut generics.params { match param.kind { GenericParamDefKind::Lifetime => {} GenericParamDefKind::Type { bounds: ref mut ty_bounds, .. } => { if &param.name == name { mem::swap(bounds, ty_bounds); break; } } GenericParamDefKind::Const { .. } => {} } } } } _ => continue, } } generics } } impl<'a, 'tcx> Clean<Generics> for (&'a ty::Generics, ty::GenericPredicates<'tcx>) { fn clean(&self, cx: &DocContext<'_>) -> Generics { use self::WherePredicate as WP; use std::collections::BTreeMap; let (gens, preds) = *self; // Don't populate `cx.impl_trait_bounds` before `clean`ning `where` clauses, // since `Clean for ty::Predicate` would consume them. let mut impl_trait = BTreeMap::<ImplTraitParam, Vec<GenericBound>>::default(); // Bounds in the type_params and lifetimes fields are repeated in the // predicates field (see rustc_typeck::collect::ty_generics), so remove // them. let stripped_params = gens .params .iter() .filter_map(|param| match param.kind { ty::GenericParamDefKind::Lifetime => Some(param.clean(cx)), ty::GenericParamDefKind::Type { synthetic, .. } => { if param.name == kw::SelfUpper { assert_eq!(param.index, 0); return None; } if synthetic == Some(hir::SyntheticTyParamKind::ImplTrait) { impl_trait.insert(param.index.into(), vec![]); return None; } Some(param.clean(cx)) } ty::GenericParamDefKind::Const { .. } => Some(param.clean(cx)), }) .collect::<Vec<GenericParamDef>>(); // param index -> [(DefId of trait, associated type name, type)] let mut impl_trait_proj = FxHashMap::<u32, Vec<(DefId, Symbol, Ty<'tcx>)>>::default(); let where_predicates = preds .predicates .iter() .flat_map(|(p, _)| { let mut projection = None; let param_idx = (|| { let bound_p = p.bound_atom(); match bound_p.skip_binder() { ty::PredicateAtom::Trait(pred, _constness) => { if let ty::Param(param) = pred.self_ty().kind() { return Some(param.index); } } ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(ty, _reg)) => { if let ty::Param(param) = ty.kind() { return Some(param.index); } } ty::PredicateAtom::Projection(p) => { if let ty::Param(param) = p.projection_ty.self_ty().kind() { projection = Some(bound_p.rebind(p)); return Some(param.index); } } _ => (), } None })(); if let Some(param_idx) = param_idx { if let Some(b) = impl_trait.get_mut(&param_idx.into()) { let p = p.clean(cx)?; b.extend( p.get_bounds() .into_iter() .flatten() .cloned() .filter(|b| !b.is_sized_bound(cx)), ); let proj = projection .map(|p| (p.skip_binder().projection_ty.clean(cx), p.skip_binder().ty)); if let Some(((_, trait_did, name), rhs)) = proj.as_ref().and_then(|(lhs, rhs)| Some((lhs.projection()?, rhs))) { impl_trait_proj .entry(param_idx) .or_default() .push((trait_did, name, rhs)); } return None; } } Some(p) }) .collect::<Vec<_>>(); for (param, mut bounds) in impl_trait { // Move trait bounds to the front. bounds.sort_by_key(|b| if let GenericBound::TraitBound(..) = b { false } else { true }); if let crate::core::ImplTraitParam::ParamIndex(idx) = param { if let Some(proj) = impl_trait_proj.remove(&idx) { for (trait_did, name, rhs) in proj { simplify::merge_bounds(cx, &mut bounds, trait_did, name, &rhs.clean(cx)); } } } else { unreachable!(); } cx.impl_trait_bounds.borrow_mut().insert(param, bounds); } // Now that `cx.impl_trait_bounds` is populated, we can process // remaining predicates which could contain `impl Trait`. let mut where_predicates = where_predicates.into_iter().flat_map(|p| p.clean(cx)).collect::<Vec<_>>(); // Type parameters have a Sized bound by default unless removed with // ?Sized. Scan through the predicates and mark any type parameter with // a Sized bound, removing the bounds as we find them. // // Note that associated types also have a sized bound by default, but we // don't actually know the set of associated types right here so that's // handled in cleaning associated types let mut sized_params = FxHashSet::default(); where_predicates.retain(|pred| match *pred { WP::BoundPredicate { ty: Generic(ref g), ref bounds } => { if bounds.iter().any(|b| b.is_sized_bound(cx)) { sized_params.insert(*g); false } else { true } } _ => true, }); // Run through the type parameters again and insert a ?Sized // unbound for any we didn't find to be Sized. for tp in &stripped_params { if matches!(tp.kind, types::GenericParamDefKind::Type { .. }) && !sized_params.contains(&tp.name) { where_predicates.push(WP::BoundPredicate { ty: Type::Generic(tp.name), bounds: vec![GenericBound::maybe_sized(cx)], }) } } // It would be nice to collect all of the bounds on a type and recombine // them if possible, to avoid e.g., `where T: Foo, T: Bar, T: Sized, T: 'a` // and instead see `where T: Foo + Bar + Sized + 'a` Generics { params: stripped_params, where_predicates: simplify::where_clauses(cx, where_predicates), } } } fn clean_fn_or_proc_macro( item: &hir::Item<'_>, sig: &'a hir::FnSig<'a>, generics: &'a hir::Generics<'a>, body_id: hir::BodyId, name: &mut Symbol, cx: &DocContext<'_>, ) -> ItemKind { let macro_kind = item.attrs.iter().find_map(|a| { if a.has_name(sym::proc_macro) { Some(MacroKind::Bang) } else if a.has_name(sym::proc_macro_derive) { Some(MacroKind::Derive) } else if a.has_name(sym::proc_macro_attribute) { Some(MacroKind::Attr) } else { None } }); match macro_kind { Some(kind) => { if kind == MacroKind::Derive { *name = item .attrs .lists(sym::proc_macro_derive) .find_map(|mi| mi.ident()) .expect("proc-macro derives require a name") .name; } let mut helpers = Vec::new(); for mi in item.attrs.lists(sym::proc_macro_derive) { if !mi.has_name(sym::attributes) { continue; } if let Some(list) = mi.meta_item_list() { for inner_mi in list { if let Some(ident) = inner_mi.ident() { helpers.push(ident.name); } } } } ProcMacroItem(ProcMacro { kind, helpers }) } None => { let mut func = (sig, generics, body_id).clean(cx); let def_id = cx.tcx.hir().local_def_id(item.hir_id).to_def_id(); func.header.constness = if is_const_fn(cx.tcx, def_id) && is_unstable_const_fn(cx.tcx, def_id).is_none() { hir::Constness::Const } else { hir::Constness::NotConst }; FunctionItem(func) } } } impl<'a> Clean<Function> for (&'a hir::FnSig<'a>, &'a hir::Generics<'a>, hir::BodyId) { fn clean(&self, cx: &DocContext<'_>) -> Function { let (generics, decl) = enter_impl_trait(cx, || (self.1.clean(cx), (&*self.0.decl, self.2).clean(cx))); let (all_types, ret_types) = get_all_types(&generics, &decl, cx); Function { decl, generics, header: self.0.header, all_types, ret_types } } } impl<'a> Clean<Arguments> for (&'a [hir::Ty<'a>], &'a [Ident]) { fn clean(&self, cx: &DocContext<'_>) -> Arguments { Arguments { values: self .0 .iter() .enumerate() .map(|(i, ty)| { let mut name = self.1.get(i).map(|ident| ident.name).unwrap_or(kw::Empty); if name.is_empty() { name = kw::Underscore; } Argument { name, type_: ty.clean(cx) } }) .collect(), } } } impl<'a> Clean<Arguments> for (&'a [hir::Ty<'a>], hir::BodyId) { fn clean(&self, cx: &DocContext<'_>) -> Arguments { let body = cx.tcx.hir().body(self.1); Arguments { values: self .0 .iter() .enumerate() .map(|(i, ty)| Argument { name: name_from_pat(&body.params[i].pat), type_: ty.clean(cx), }) .collect(), } } } impl<'a, A: Copy> Clean<FnDecl> for (&'a hir::FnDecl<'a>, A) where (&'a [hir::Ty<'a>], A): Clean<Arguments>, { fn clean(&self, cx: &DocContext<'_>) -> FnDecl { FnDecl { inputs: (&self.0.inputs[..], self.1).clean(cx), output: self.0.output.clean(cx), c_variadic: self.0.c_variadic, attrs: Attributes::default(), } } } impl<'tcx> Clean<FnDecl> for (DefId, ty::PolyFnSig<'tcx>) { fn clean(&self, cx: &DocContext<'_>) -> FnDecl { let (did, sig) = *self; let mut names = if did.is_local() { &[] } else { cx.tcx.fn_arg_names(did) }.iter(); FnDecl { output: Return(sig.skip_binder().output().clean(cx)), attrs: Attributes::default(), c_variadic: sig.skip_binder().c_variadic, inputs: Arguments { values: sig .skip_binder() .inputs() .iter() .map(|t| Argument { type_: t.clean(cx), name: names.next().map(|i| i.name).unwrap_or(kw::Empty), }) .collect(), }, } } } impl Clean<FnRetTy> for hir::FnRetTy<'_> { fn clean(&self, cx: &DocContext<'_>) -> FnRetTy { match *self { Self::Return(ref typ) => Return(typ.clean(cx)), Self::DefaultReturn(..) => DefaultReturn, } } } impl Clean<bool> for hir::IsAuto { fn clean(&self, _: &DocContext<'_>) -> bool { match *self { hir::IsAuto::Yes => true, hir::IsAuto::No => false, } } } impl Clean<Type> for hir::TraitRef<'_> { fn clean(&self, cx: &DocContext<'_>) -> Type { resolve_type(cx, self.path.clean(cx), self.hir_ref_id) } } impl Clean<PolyTrait> for hir::PolyTraitRef<'_> { fn clean(&self, cx: &DocContext<'_>) -> PolyTrait { PolyTrait { trait_: self.trait_ref.clean(cx), generic_params: self.bound_generic_params.clean(cx), } } } impl Clean<TypeKind> for hir::def::DefKind { fn clean(&self, _: &DocContext<'_>) -> TypeKind { match *self { hir::def::DefKind::Mod => TypeKind::Module, hir::def::DefKind::Struct => TypeKind::Struct, hir::def::DefKind::Union => TypeKind::Union, hir::def::DefKind::Enum => TypeKind::Enum, hir::def::DefKind::Trait => TypeKind::Trait, hir::def::DefKind::TyAlias => TypeKind::Typedef, hir::def::DefKind::ForeignTy => TypeKind::Foreign, hir::def::DefKind::TraitAlias => TypeKind::TraitAlias, hir::def::DefKind::Fn => TypeKind::Function, hir::def::DefKind::Const => TypeKind::Const, hir::def::DefKind::Static => TypeKind::Static, hir::def::DefKind::Macro(_) => TypeKind::Macro, _ => TypeKind::Foreign, } } } impl Clean<Item> for hir::TraitItem<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { let local_did = cx.tcx.hir().local_def_id(self.hir_id).to_def_id(); cx.with_param_env(local_did, || { let inner = match self.kind { hir::TraitItemKind::Const(ref ty, default) => { AssocConstItem(ty.clean(cx), default.map(|e| print_const_expr(cx, e))) } hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => { let mut m = (sig, &self.generics, body).clean(cx); if m.header.constness == hir::Constness::Const && is_unstable_const_fn(cx.tcx, local_did).is_some() { m.header.constness = hir::Constness::NotConst; } MethodItem(m, None) } hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Required(ref names)) => { let (generics, decl) = enter_impl_trait(cx, || { (self.generics.clean(cx), (&*sig.decl, &names[..]).clean(cx)) }); let (all_types, ret_types) = get_all_types(&generics, &decl, cx); let mut t = Function { header: sig.header, decl, generics, all_types, ret_types }; if t.header.constness == hir::Constness::Const && is_unstable_const_fn(cx.tcx, local_did).is_some() { t.header.constness = hir::Constness::NotConst; } TyMethodItem(t) } hir::TraitItemKind::Type(ref bounds, ref default) => { AssocTypeItem(bounds.clean(cx), default.clean(cx)) } }; Item::from_def_id_and_parts(local_did, Some(self.ident.name), inner, cx) }) } } impl Clean<Item> for hir::ImplItem<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { let local_did = cx.tcx.hir().local_def_id(self.hir_id).to_def_id(); cx.with_param_env(local_did, || { let inner = match self.kind { hir::ImplItemKind::Const(ref ty, expr) => { AssocConstItem(ty.clean(cx), Some(print_const_expr(cx, expr))) } hir::ImplItemKind::Fn(ref sig, body) => { let mut m = (sig, &self.generics, body).clean(cx); if m.header.constness == hir::Constness::Const && is_unstable_const_fn(cx.tcx, local_did).is_some() { m.header.constness = hir::Constness::NotConst; } MethodItem(m, Some(self.defaultness)) } hir::ImplItemKind::TyAlias(ref ty) => { let type_ = ty.clean(cx); let item_type = type_.def_id().and_then(|did| inline::build_ty(cx, did)); TypedefItem(Typedef { type_, generics: Generics::default(), item_type }, true) } }; Item::from_def_id_and_parts(local_did, Some(self.ident.name), inner, cx) }) } } impl Clean<Item> for ty::AssocItem { fn clean(&self, cx: &DocContext<'_>) -> Item { let kind = match self.kind { ty::AssocKind::Const => { let ty = cx.tcx.type_of(self.def_id); let default = if self.defaultness.has_value() { Some(inline::print_inlined_const(cx, self.def_id)) } else { None }; AssocConstItem(ty.clean(cx), default) } ty::AssocKind::Fn => { let generics = (cx.tcx.generics_of(self.def_id), cx.tcx.explicit_predicates_of(self.def_id)) .clean(cx); let sig = cx.tcx.fn_sig(self.def_id); let mut decl = (self.def_id, sig).clean(cx); if self.fn_has_self_parameter { let self_ty = match self.container { ty::ImplContainer(def_id) => cx.tcx.type_of(def_id), ty::TraitContainer(_) => cx.tcx.types.self_param, }; let self_arg_ty = sig.input(0).skip_binder(); if self_arg_ty == self_ty { decl.inputs.values[0].type_ = Generic(kw::SelfUpper); } else if let ty::Ref(_, ty, _) = *self_arg_ty.kind() { if ty == self_ty { match decl.inputs.values[0].type_ { BorrowedRef { ref mut type_, .. } => { **type_ = Generic(kw::SelfUpper) } _ => unreachable!(), } } } } let provided = match self.container { ty::ImplContainer(_) => true, ty::TraitContainer(_) => self.defaultness.has_value(), }; let (all_types, ret_types) = get_all_types(&generics, &decl, cx); if provided { let constness = if is_min_const_fn(cx.tcx, self.def_id) { hir::Constness::Const } else { hir::Constness::NotConst }; let asyncness = cx.tcx.asyncness(self.def_id); let defaultness = match self.container { ty::ImplContainer(_) => Some(self.defaultness), ty::TraitContainer(_) => None, }; MethodItem( Function { generics, decl, header: hir::FnHeader { unsafety: sig.unsafety(), abi: sig.abi(), constness, asyncness, }, all_types, ret_types, }, defaultness, ) } else { TyMethodItem(Function { generics, decl, header: hir::FnHeader { unsafety: sig.unsafety(), abi: sig.abi(), constness: hir::Constness::NotConst, asyncness: hir::IsAsync::NotAsync, }, all_types, ret_types, }) } } ty::AssocKind::Type => { let my_name = self.ident.name; if let ty::TraitContainer(_) = self.container { let bounds = cx.tcx.explicit_item_bounds(self.def_id); let predicates = ty::GenericPredicates { parent: None, predicates: bounds }; let generics = (cx.tcx.generics_of(self.def_id), predicates).clean(cx); let mut bounds = generics .where_predicates .iter() .filter_map(|pred| { let (name, self_type, trait_, bounds) = match *pred { WherePredicate::BoundPredicate { ty: QPath { ref name, ref self_type, ref trait_ }, ref bounds, } => (name, self_type, trait_, bounds), _ => return None, }; if *name != my_name { return None; } match **trait_ { ResolvedPath { did, .. } if did == self.container.id() => {} _ => return None, } match **self_type { Generic(ref s) if *s == kw::SelfUpper => {} _ => return None, } Some(bounds) }) .flat_map(|i| i.iter().cloned()) .collect::<Vec<_>>(); // Our Sized/?Sized bound didn't get handled when creating the generics // because we didn't actually get our whole set of bounds until just now // (some of them may have come from the trait). If we do have a sized // bound, we remove it, and if we don't then we add the `?Sized` bound // at the end. match bounds.iter().position(|b| b.is_sized_bound(cx)) { Some(i) => { bounds.remove(i); } None => bounds.push(GenericBound::maybe_sized(cx)), } let ty = if self.defaultness.has_value() { Some(cx.tcx.type_of(self.def_id)) } else { None }; AssocTypeItem(bounds, ty.clean(cx)) } else { let type_ = cx.tcx.type_of(self.def_id).clean(cx); let item_type = type_.def_id().and_then(|did| inline::build_ty(cx, did)); TypedefItem( Typedef { type_, generics: Generics { params: Vec::new(), where_predicates: Vec::new() }, item_type, }, true, ) } } }; Item::from_def_id_and_parts(self.def_id, Some(self.ident.name), kind, cx) } } fn clean_qpath(hir_ty: &hir::Ty<'_>, cx: &DocContext<'_>) -> Type { use rustc_hir::GenericParamCount; let hir::Ty { hir_id, span, ref kind } = *hir_ty; let qpath = match kind { hir::TyKind::Path(qpath) => qpath, _ => unreachable!(), }; match qpath { hir::QPath::Resolved(None, ref path) => { if let Res::Def(DefKind::TyParam, did) = path.res { if let Some(new_ty) = cx.ty_substs.borrow().get(&did).cloned() { return new_ty; } if let Some(bounds) = cx.impl_trait_bounds.borrow_mut().remove(&did.into()) { return ImplTrait(bounds); } } let mut alias = None; if let Res::Def(DefKind::TyAlias, def_id) = path.res { // Substitute private type aliases if let Some(def_id) = def_id.as_local() { let hir_id = cx.tcx.hir().local_def_id_to_hir_id(def_id); if !cx.renderinfo.borrow().access_levels.is_exported(def_id.to_def_id()) { alias = Some(&cx.tcx.hir().expect_item(hir_id).kind); } } }; if let Some(&hir::ItemKind::TyAlias(ref ty, ref generics)) = alias { let provided_params = &path.segments.last().expect("segments were empty"); let mut ty_substs = FxHashMap::default(); let mut lt_substs = FxHashMap::default(); let mut ct_substs = FxHashMap::default(); let generic_args = provided_params.generic_args(); { let mut indices: GenericParamCount = Default::default(); for param in generics.params.iter() { match param.kind { hir::GenericParamKind::Lifetime { .. } => { let mut j = 0; let lifetime = generic_args.args.iter().find_map(|arg| match arg { hir::GenericArg::Lifetime(lt) => { if indices.lifetimes == j { return Some(lt); } j += 1; None } _ => None, }); if let Some(lt) = lifetime.cloned() { let lt_def_id = cx.tcx.hir().local_def_id(param.hir_id); let cleaned = if !lt.is_elided() { lt.clean(cx) } else { self::types::Lifetime::elided() }; lt_substs.insert(lt_def_id.to_def_id(), cleaned); } indices.lifetimes += 1; } hir::GenericParamKind::Type { ref default, .. } => { let ty_param_def_id = cx.tcx.hir().local_def_id(param.hir_id); let mut j = 0; let type_ = generic_args.args.iter().find_map(|arg| match arg { hir::GenericArg::Type(ty) => { if indices.types == j { return Some(ty); } j += 1; None } _ => None, }); if let Some(ty) = type_ { ty_substs.insert(ty_param_def_id.to_def_id(), ty.clean(cx)); } else if let Some(default) = *default { ty_substs .insert(ty_param_def_id.to_def_id(), default.clean(cx)); } indices.types += 1; } hir::GenericParamKind::Const { .. } => { let const_param_def_id = cx.tcx.hir().local_def_id(param.hir_id); let mut j = 0; let const_ = generic_args.args.iter().find_map(|arg| match arg { hir::GenericArg::Const(ct) => { if indices.consts == j { return Some(ct); } j += 1; None } _ => None, }); if let Some(ct) = const_ { ct_substs.insert(const_param_def_id.to_def_id(), ct.clean(cx)); } // FIXME(const_generics_defaults) indices.consts += 1; } } } } return cx.enter_alias(ty_substs, lt_substs, ct_substs, || ty.clean(cx)); } resolve_type(cx, path.clean(cx), hir_id) } hir::QPath::Resolved(Some(ref qself), ref p) => { // Try to normalize `<X as Y>::T` to a type let ty = hir_ty_to_ty(cx.tcx, hir_ty); if let Some(normalized_value) = normalize(cx, ty) { return normalized_value.clean(cx); } let segments = if p.is_global() { &p.segments[1..] } else { &p.segments }; let trait_segments = &segments[..segments.len() - 1]; let trait_path = self::Path { global: p.is_global(), res: Res::Def( DefKind::Trait, cx.tcx.associated_item(p.res.def_id()).container.id(), ), segments: trait_segments.clean(cx), }; Type::QPath { name: p.segments.last().expect("segments were empty").ident.name, self_type: box qself.clean(cx), trait_: box resolve_type(cx, trait_path, hir_id), } } hir::QPath::TypeRelative(ref qself, ref segment) => { let ty = hir_ty_to_ty(cx.tcx, hir_ty); let res = if let ty::Projection(proj) = ty.kind() { Res::Def(DefKind::Trait, proj.trait_ref(cx.tcx).def_id) } else { Res::Err }; let trait_path = hir::Path { span, res, segments: &[] }; Type::QPath { name: segment.ident.name, self_type: box qself.clean(cx), trait_: box resolve_type(cx, trait_path.clean(cx), hir_id), } } hir::QPath::LangItem(..) => bug!("clean: requiring documentation of lang item"), } } impl Clean<Type> for hir::Ty<'_> { fn clean(&self, cx: &DocContext<'_>) -> Type { use rustc_hir::*; match self.kind { TyKind::Never => Never, TyKind::Ptr(ref m) => RawPointer(m.mutbl, box m.ty.clean(cx)), TyKind::Rptr(ref l, ref m) => { // There are two times a `Fresh` lifetime can be created: // 1. For `&'_ x`, written by the user. This corresponds to `lower_lifetime` in `rustc_ast_lowering`. // 2. For `&x` as a parameter to an `async fn`. This corresponds to `elided_ref_lifetime in `rustc_ast_lowering`. // See #59286 for more information. // Ideally we would only hide the `'_` for case 2., but I don't know a way to distinguish it. // Turning `fn f(&'_ self)` into `fn f(&self)` isn't the worst thing in the world, though; // there's no case where it could cause the function to fail to compile. let elided = l.is_elided() || matches!(l.name, LifetimeName::Param(ParamName::Fresh(_))); let lifetime = if elided { None } else { Some(l.clean(cx)) }; BorrowedRef { lifetime, mutability: m.mutbl, type_: box m.ty.clean(cx) } } TyKind::Slice(ref ty) => Slice(box ty.clean(cx)), TyKind::Array(ref ty, ref length) => { let def_id = cx.tcx.hir().local_def_id(length.hir_id); // NOTE(min_const_generics): We can't use `const_eval_poly` for constants // as we currently do not supply the parent generics to anonymous constants // but do allow `ConstKind::Param`. // // `const_eval_poly` tries to to first substitute generic parameters which // results in an ICE while manually constructing the constant and using `eval` // does nothing for `ConstKind::Param`. let ct = ty::Const::from_anon_const(cx.tcx, def_id); let param_env = cx.tcx.param_env(def_id); let length = print_const(cx, ct.eval(cx.tcx, param_env)); Array(box ty.clean(cx), length) } TyKind::Tup(ref tys) => Tuple(tys.clean(cx)), TyKind::OpaqueDef(item_id, _) => { let item = cx.tcx.hir().expect_item(item_id.id); if let hir::ItemKind::OpaqueTy(ref ty) = item.kind { ImplTrait(ty.bounds.clean(cx)) } else { unreachable!() } } TyKind::Path(_) => clean_qpath(&self, cx), TyKind::TraitObject(ref bounds, ref lifetime) => { match bounds[0].clean(cx).trait_ { ResolvedPath { path, param_names: None, did, is_generic } => { let mut bounds: Vec<self::GenericBound> = bounds[1..] .iter() .map(|bound| { self::GenericBound::TraitBound( bound.clean(cx), hir::TraitBoundModifier::None, ) }) .collect(); if !lifetime.is_elided() { bounds.push(self::GenericBound::Outlives(lifetime.clean(cx))); } ResolvedPath { path, param_names: Some(bounds), did, is_generic } } _ => Infer, // shouldn't happen } } TyKind::BareFn(ref barefn) => BareFunction(box barefn.clean(cx)), TyKind::Infer | TyKind::Err => Infer, TyKind::Typeof(..) => panic!("unimplemented type {:?}", self.kind), } } } /// Returns `None` if the type could not be normalized fn normalize(cx: &DocContext<'tcx>, ty: Ty<'_>) -> Option<Ty<'tcx>> { // HACK: low-churn fix for #79459 while we wait for a trait normalization fix if !cx.tcx.sess.opts.debugging_opts.normalize_docs { return None; } use crate::rustc_trait_selection::infer::TyCtxtInferExt; use crate::rustc_trait_selection::traits::query::normalize::AtExt; use rustc_middle::traits::ObligationCause; // Try to normalize `<X as Y>::T` to a type let lifted = ty.lift_to_tcx(cx.tcx).unwrap(); let normalized = cx.tcx.infer_ctxt().enter(|infcx| { infcx .at(&ObligationCause::dummy(), cx.param_env.get()) .normalize(lifted) .map(|resolved| infcx.resolve_vars_if_possible(resolved.value)) }); match normalized { Ok(normalized_value) => { debug!("normalized {:?} to {:?}", ty, normalized_value); Some(normalized_value) } Err(err) => { debug!("failed to normalize {:?}: {:?}", ty, err); None } } } impl<'tcx> Clean<Type> for Ty<'tcx> { fn clean(&self, cx: &DocContext<'_>) -> Type { debug!("cleaning type: {:?}", self); let ty = normalize(cx, self).unwrap_or(self); match *ty.kind() { ty::Never => Never, ty::Bool => Primitive(PrimitiveType::Bool), ty::Char => Primitive(PrimitiveType::Char), ty::Int(int_ty) => Primitive(int_ty.into()), ty::Uint(uint_ty) => Primitive(uint_ty.into()), ty::Float(float_ty) => Primitive(float_ty.into()), ty::Str => Primitive(PrimitiveType::Str), ty::Slice(ty) => Slice(box ty.clean(cx)), ty::Array(ty, n) => { let mut n = cx.tcx.lift(n).expect("array lift failed"); n = n.eval(cx.tcx, ty::ParamEnv::reveal_all()); let n = print_const(cx, n); Array(box ty.clean(cx), n) } ty::RawPtr(mt) => RawPointer(mt.mutbl, box mt.ty.clean(cx)), ty::Ref(r, ty, mutbl) => { BorrowedRef { lifetime: r.clean(cx), mutability: mutbl, type_: box ty.clean(cx) } } ty::FnDef(..) | ty::FnPtr(_) => { let ty = cx.tcx.lift(*self).expect("FnPtr lift failed"); let sig = ty.fn_sig(cx.tcx); let def_id = DefId::local(CRATE_DEF_INDEX); BareFunction(box BareFunctionDecl { unsafety: sig.unsafety(), generic_params: Vec::new(), decl: (def_id, sig).clean(cx), abi: sig.abi(), }) } ty::Adt(def, substs) => { let did = def.did; let kind = match def.adt_kind() { AdtKind::Struct => TypeKind::Struct, AdtKind::Union => TypeKind::Union, AdtKind::Enum => TypeKind::Enum, }; inline::record_extern_fqn(cx, did, kind); let path = external_path(cx, cx.tcx.item_name(did), None, false, vec![], substs); ResolvedPath { path, param_names: None, did, is_generic: false } } ty::Foreign(did) => { inline::record_extern_fqn(cx, did, TypeKind::Foreign); let path = external_path( cx, cx.tcx.item_name(did), None, false, vec![], InternalSubsts::empty(), ); ResolvedPath { path, param_names: None, did, is_generic: false } } ty::Dynamic(ref obj, ref reg) => { // HACK: pick the first `did` as the `did` of the trait object. Someone // might want to implement "native" support for marker-trait-only // trait objects. let mut dids = obj.principal_def_id().into_iter().chain(obj.auto_traits()); let did = dids .next() .unwrap_or_else(|| panic!("found trait object `{:?}` with no traits?", self)); let substs = match obj.principal() { Some(principal) => principal.skip_binder().substs, // marker traits have no substs. _ => cx.tcx.intern_substs(&[]), }; inline::record_extern_fqn(cx, did, TypeKind::Trait); let mut param_names = vec![]; if let Some(b) = reg.clean(cx) { param_names.push(GenericBound::Outlives(b)); } for did in dids { let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, cx.tcx.item_name(did), Some(did), false, vec![], empty); inline::record_extern_fqn(cx, did, TypeKind::Trait); let bound = GenericBound::TraitBound( PolyTrait { trait_: ResolvedPath { path, param_names: None, did, is_generic: false, }, generic_params: Vec::new(), }, hir::TraitBoundModifier::None, ); param_names.push(bound); } let mut bindings = vec![]; for pb in obj.projection_bounds() { bindings.push(TypeBinding { name: cx.tcx.associated_item(pb.item_def_id()).ident.name, kind: TypeBindingKind::Equality { ty: pb.skip_binder().ty.clean(cx) }, }); } let path = external_path(cx, cx.tcx.item_name(did), Some(did), false, bindings, substs); ResolvedPath { path, param_names: Some(param_names), did, is_generic: false } } ty::Tuple(ref t) => { Tuple(t.iter().map(|t| t.expect_ty()).collect::<Vec<_>>().clean(cx)) } ty::Projection(ref data) => data.clean(cx), ty::Param(ref p) => { if let Some(bounds) = cx.impl_trait_bounds.borrow_mut().remove(&p.index.into()) { ImplTrait(bounds) } else { Generic(p.name) } } ty::Opaque(def_id, substs) => { // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`, // by looking up the bounds associated with the def_id. let substs = cx.tcx.lift(substs).expect("Opaque lift failed"); let bounds = cx .tcx .explicit_item_bounds(def_id) .iter() .map(|(bound, _)| bound.subst(cx.tcx, substs)) .collect::<Vec<_>>(); let mut regions = vec![]; let mut has_sized = false; let mut bounds = bounds .iter() .filter_map(|bound| { // Note: The substs of opaque types can contain unbound variables, // meaning that we have to use `ignore_quantifiers_with_unbound_vars` here. let bound_predicate = bound.bound_atom_with_opt_escaping(cx.tcx); let trait_ref = match bound_predicate.skip_binder() { ty::PredicateAtom::Trait(tr, _constness) => { bound_predicate.rebind(tr.trait_ref) } ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(_ty, reg)) => { if let Some(r) = reg.clean(cx) { regions.push(GenericBound::Outlives(r)); } return None; } _ => return None, }; if let Some(sized) = cx.tcx.lang_items().sized_trait() { if trait_ref.def_id() == sized { has_sized = true; return None; } } let bounds: Vec<_> = bounds .iter() .filter_map(|bound| { if let ty::PredicateAtom::Projection(proj) = bound.bound_atom_with_opt_escaping(cx.tcx).skip_binder() { if proj.projection_ty.trait_ref(cx.tcx) == trait_ref.skip_binder() { Some(TypeBinding { name: cx .tcx .associated_item(proj.projection_ty.item_def_id) .ident .name, kind: TypeBindingKind::Equality { ty: proj.ty.clean(cx), }, }) } else { None } } else { None } }) .collect(); Some((trait_ref, &bounds[..]).clean(cx)) }) .collect::<Vec<_>>(); bounds.extend(regions); if !has_sized && !bounds.is_empty() { bounds.insert(0, GenericBound::maybe_sized(cx)); } ImplTrait(bounds) } ty::Closure(..) | ty::Generator(..) => Tuple(vec![]), // FIXME(pcwalton) ty::Bound(..) => panic!("Bound"), ty::Placeholder(..) => panic!("Placeholder"), ty::GeneratorWitness(..) => panic!("GeneratorWitness"), ty::Infer(..) => panic!("Infer"), ty::Error(_) => panic!("Error"), } } } impl<'tcx> Clean<Constant> for ty::Const<'tcx> { fn clean(&self, cx: &DocContext<'_>) -> Constant { Constant { type_: self.ty.clean(cx), expr: format!("{}", self), value: None, is_literal: false, } } } impl Clean<Item> for hir::StructField<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { let what_rustc_thinks = Item::from_hir_id_and_parts( self.hir_id, Some(self.ident.name), StructFieldItem(self.ty.clean(cx)), cx, ); // Don't show `pub` for fields on enum variants; they are always public Item { visibility: self.vis.clean(cx), ..what_rustc_thinks } } } impl Clean<Item> for ty::FieldDef { fn clean(&self, cx: &DocContext<'_>) -> Item { let what_rustc_thinks = Item::from_def_id_and_parts( self.did, Some(self.ident.name), StructFieldItem(cx.tcx.type_of(self.did).clean(cx)), cx, ); // Don't show `pub` for fields on enum variants; they are always public Item { visibility: self.vis.clean(cx), ..what_rustc_thinks } } } impl Clean<Visibility> for hir::Visibility<'_> { fn clean(&self, cx: &DocContext<'_>) -> Visibility { match self.node { hir::VisibilityKind::Public => Visibility::Public, hir::VisibilityKind::Inherited => Visibility::Inherited, hir::VisibilityKind::Crate(_) => { let krate = DefId::local(CRATE_DEF_INDEX); Visibility::Restricted(krate) } hir::VisibilityKind::Restricted { ref path, .. } => { let path = path.clean(cx); let did = register_res(cx, path.res); Visibility::Restricted(did) } } } } impl Clean<Visibility> for ty::Visibility { fn clean(&self, _cx: &DocContext<'_>) -> Visibility { match *self { ty::Visibility::Public => Visibility::Public, // NOTE: this is not quite right: `ty` uses `Invisible` to mean 'private', // while rustdoc really does mean inherited. That means that for enum variants, such as // `pub enum E { V }`, `V` will be marked as `Public` by `ty`, but as `Inherited` by rustdoc. // This is the main reason `impl Clean for hir::Visibility` still exists; various parts of clean // override `tcx.visibility` explicitly to make sure this distinction is captured. ty::Visibility::Invisible => Visibility::Inherited, ty::Visibility::Restricted(module) => Visibility::Restricted(module), } } } impl Clean<VariantStruct> for rustc_hir::VariantData<'_> { fn clean(&self, cx: &DocContext<'_>) -> VariantStruct { VariantStruct { struct_type: doctree::struct_type_from_def(self), fields: self.fields().iter().map(|x| x.clean(cx)).collect(), fields_stripped: false, } } } impl Clean<Item> for doctree::Variant<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { let what_rustc_thinks = Item::from_hir_id_and_parts( self.id, Some(self.name), VariantItem(Variant { kind: self.def.clean(cx) }), cx, ); // don't show `pub` for variants, which are always public Item { visibility: Inherited, ..what_rustc_thinks } } } impl Clean<Item> for ty::VariantDef { fn clean(&self, cx: &DocContext<'_>) -> Item { let kind = match self.ctor_kind { CtorKind::Const => VariantKind::CLike, CtorKind::Fn => VariantKind::Tuple( self.fields.iter().map(|f| cx.tcx.type_of(f.did).clean(cx)).collect(), ), CtorKind::Fictive => VariantKind::Struct(VariantStruct { struct_type: doctree::Plain, fields_stripped: false, fields: self .fields .iter() .map(|field| { let name = Some(field.ident.name); let kind = StructFieldItem(cx.tcx.type_of(field.did).clean(cx)); let what_rustc_thinks = Item::from_def_id_and_parts(field.did, name, kind, cx); // don't show `pub` for fields, which are always public Item { visibility: Visibility::Inherited, ..what_rustc_thinks } }) .collect(), }), }; let what_rustc_thinks = Item::from_def_id_and_parts( self.def_id, Some(self.ident.name), VariantItem(Variant { kind }), cx, ); // don't show `pub` for fields, which are always public Item { visibility: Inherited, ..what_rustc_thinks } } } impl Clean<VariantKind> for hir::VariantData<'_> { fn clean(&self, cx: &DocContext<'_>) -> VariantKind { match self { hir::VariantData::Struct(..) => VariantKind::Struct(self.clean(cx)), hir::VariantData::Tuple(..) => { VariantKind::Tuple(self.fields().iter().map(|x| x.ty.clean(cx)).collect()) } hir::VariantData::Unit(..) => VariantKind::CLike, } } } impl Clean<Span> for rustc_span::Span { fn clean(&self, _cx: &DocContext<'_>) -> Span { Span::from_rustc_span(*self) } } impl Clean<Path> for hir::Path<'_> { fn clean(&self, cx: &DocContext<'_>) -> Path { Path { global: self.is_global(), res: self.res, segments: if self.is_global() { &self.segments[1..] } else { &self.segments }.clean(cx), } } } impl Clean<GenericArgs> for hir::GenericArgs<'_> { fn clean(&self, cx: &DocContext<'_>) -> GenericArgs { if self.parenthesized { let output = self.bindings[0].ty().clean(cx); GenericArgs::Parenthesized { inputs: self.inputs().clean(cx), output: if output != Type::Tuple(Vec::new()) { Some(output) } else { None }, } } else { GenericArgs::AngleBracketed { args: self .args .iter() .map(|arg| match arg { hir::GenericArg::Lifetime(lt) if !lt.is_elided() => { GenericArg::Lifetime(lt.clean(cx)) } hir::GenericArg::Lifetime(_) => GenericArg::Lifetime(Lifetime::elided()), hir::GenericArg::Type(ty) => GenericArg::Type(ty.clean(cx)), hir::GenericArg::Const(ct) => GenericArg::Const(ct.clean(cx)), }) .collect(), bindings: self.bindings.clean(cx), } } } } impl Clean<PathSegment> for hir::PathSegment<'_> { fn clean(&self, cx: &DocContext<'_>) -> PathSegment { PathSegment { name: self.ident.name, args: self.generic_args().clean(cx) } } } impl Clean<String> for Ident { #[inline] fn clean(&self, cx: &DocContext<'_>) -> String { self.name.clean(cx) } } impl Clean<String> for Symbol { #[inline] fn clean(&self, _: &DocContext<'_>) -> String { self.to_string() } } impl Clean<BareFunctionDecl> for hir::BareFnTy<'_> { fn clean(&self, cx: &DocContext<'_>) -> BareFunctionDecl { let (generic_params, decl) = enter_impl_trait(cx, || { (self.generic_params.clean(cx), (&*self.decl, &self.param_names[..]).clean(cx)) }); BareFunctionDecl { unsafety: self.unsafety, abi: self.abi, decl, generic_params } } } impl Clean<Vec<Item>> for (&hir::Item<'_>, Option<Symbol>) { fn clean(&self, cx: &DocContext<'_>) -> Vec<Item> { use hir::ItemKind; let (item, renamed) = self; let def_id = cx.tcx.hir().local_def_id(item.hir_id).to_def_id(); let mut name = renamed.unwrap_or_else(|| cx.tcx.hir().name(item.hir_id)); cx.with_param_env(def_id, || { let kind = match item.kind { ItemKind::Static(ty, mutability, body_id) => StaticItem(Static { type_: ty.clean(cx), mutability, expr: print_const_expr(cx, body_id), }), ItemKind::Const(ty, body_id) => ConstantItem(Constant { type_: ty.clean(cx), expr: print_const_expr(cx, body_id), value: print_evaluated_const(cx, def_id), is_literal: is_literal_expr(cx, body_id.hir_id), }), ItemKind::OpaqueTy(ref ty) => OpaqueTyItem(OpaqueTy { bounds: ty.bounds.clean(cx), generics: ty.generics.clean(cx), }), ItemKind::TyAlias(ty, ref generics) => { let rustdoc_ty = ty.clean(cx); let item_type = rustdoc_ty.def_id().and_then(|did| inline::build_ty(cx, did)); TypedefItem( Typedef { type_: rustdoc_ty, generics: generics.clean(cx), item_type }, false, ) } ItemKind::Enum(ref def, ref generics) => EnumItem(Enum { variants: def.variants.iter().map(|v| v.clean(cx)).collect(), generics: generics.clean(cx), variants_stripped: false, }), ItemKind::TraitAlias(ref generics, bounds) => TraitAliasItem(TraitAlias { generics: generics.clean(cx), bounds: bounds.clean(cx), }), ItemKind::Union(ref variant_data, ref generics) => UnionItem(Union { struct_type: doctree::struct_type_from_def(&variant_data), generics: generics.clean(cx), fields: variant_data.fields().clean(cx), fields_stripped: false, }), ItemKind::Struct(ref variant_data, ref generics) => StructItem(Struct { struct_type: doctree::struct_type_from_def(&variant_data), generics: generics.clean(cx), fields: variant_data.fields().clean(cx), fields_stripped: false, }), ItemKind::Impl { .. } => return clean_impl(item, cx), // proc macros can have a name set by attributes ItemKind::Fn(ref sig, ref generics, body_id) => { clean_fn_or_proc_macro(item, sig, generics, body_id, &mut name, cx) } hir::ItemKind::Trait(is_auto, unsafety, ref generics, ref bounds, ref item_ids) => { let items = item_ids .iter() .map(|ti| cx.tcx.hir().trait_item(ti.id).clean(cx)) .collect(); let attrs = item.attrs.clean(cx); let is_spotlight = attrs.has_doc_flag(sym::spotlight); TraitItem(Trait { unsafety, items, generics: generics.clean(cx), bounds: bounds.clean(cx), is_spotlight, is_auto: is_auto.clean(cx), }) } ItemKind::ExternCrate(orig_name) => { return clean_extern_crate(item, name, orig_name, cx); } _ => unreachable!("not yet converted"), }; vec![Item::from_def_id_and_parts(def_id, Some(name), kind, cx)] }) } } impl Clean<Item> for hir::Variant<'_> { fn clean(&self, cx: &DocContext<'_>) -> Item { let kind = VariantItem(Variant { kind: self.data.clean(cx) }); let what_rustc_thinks = Item::from_hir_id_and_parts(self.id, Some(self.ident.name), kind, cx); // don't show `pub` for variants, which are always public Item { visibility: Inherited, ..what_rustc_thinks } } } impl Clean<ImplPolarity> for ty::ImplPolarity { fn clean(&self, _: &DocContext<'_>) -> ImplPolarity { match self { &ty::ImplPolarity::Positive | // FIXME: do we want to do something else here? &ty::ImplPolarity::Reservation => ImplPolarity::Positive, &ty::ImplPolarity::Negative => ImplPolarity::Negative, } } } fn clean_impl(impl_: &hir::Item<'_>, cx: &DocContext<'_>) -> Vec<Item> { let mut ret = Vec::new(); let (trait_, items, for_, unsafety, generics) = match &impl_.kind { hir::ItemKind::Impl { of_trait, items, self_ty, unsafety, generics, .. } => { (of_trait, items, self_ty, *unsafety, generics) } _ => unreachable!(), }; let trait_ = trait_.clean(cx); let items = items.iter().map(|ii| cx.tcx.hir().impl_item(ii.id).clean(cx)).collect::<Vec<_>>(); let def_id = cx.tcx.hir().local_def_id(impl_.hir_id); // If this impl block is an implementation of the Deref trait, then we // need to try inlining the target's inherent impl blocks as well. if trait_.def_id() == cx.tcx.lang_items().deref_trait() { build_deref_target_impls(cx, &items, &mut ret); } let provided: FxHashSet<Symbol> = trait_ .def_id() .map(|did| cx.tcx.provided_trait_methods(did).map(|meth| meth.ident.name).collect()) .unwrap_or_default(); let for_ = for_.clean(cx); let type_alias = for_.def_id().and_then(|did| match cx.tcx.def_kind(did) { DefKind::TyAlias => Some(cx.tcx.type_of(did).clean(cx)), _ => None, }); let make_item = |trait_: Option<Type>, for_: Type, items: Vec<Item>| { let kind = ImplItem(Impl { unsafety, generics: generics.clean(cx), provided_trait_methods: provided.clone(), trait_, for_, items, polarity: Some(cx.tcx.impl_polarity(def_id).clean(cx)), synthetic: false, blanket_impl: None, }); Item::from_hir_id_and_parts(impl_.hir_id, None, kind, cx) }; if let Some(type_alias) = type_alias { ret.push(make_item(trait_.clone(), type_alias, items.clone())); } ret.push(make_item(trait_, for_, items)); ret } fn clean_extern_crate( krate: &hir::Item<'_>, name: Symbol, orig_name: Option<Symbol>, cx: &DocContext<'_>, ) -> Vec<Item> { // this is the ID of the `extern crate` statement let def_id = cx.tcx.hir().local_def_id(krate.hir_id); let cnum = cx.tcx.extern_mod_stmt_cnum(def_id).unwrap_or(LOCAL_CRATE); // this is the ID of the crate itself let crate_def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX }; let please_inline = krate.vis.node.is_pub() && krate.attrs.iter().any(|a| { a.has_name(sym::doc) && match a.meta_item_list() { Some(l) => attr::list_contains_name(&l, sym::inline), None => false, } }); if please_inline { let mut visited = FxHashSet::default(); let res = Res::Def(DefKind::Mod, crate_def_id); if let Some(items) = inline::try_inline( cx, cx.tcx.parent_module(krate.hir_id).to_def_id(), res, name, Some(krate.attrs), &mut visited, ) { return items; } } // FIXME: using `from_def_id_and_kind` breaks `rustdoc/masked` for some reason vec![Item { name: None, attrs: krate.attrs.clean(cx), source: krate.span.clean(cx), def_id: crate_def_id, visibility: krate.vis.clean(cx), kind: box ExternCrateItem(name, orig_name), }] } impl Clean<Vec<Item>> for doctree::Import<'_> { fn clean(&self, cx: &DocContext<'_>) -> Vec<Item> { // We need this comparison because some imports (for std types for example) // are "inserted" as well but directly by the compiler and they should not be // taken into account. if self.span.ctxt().outer_expn_data().kind == ExpnKind::AstPass(AstPass::StdImports) { return Vec::new(); } let (doc_meta_item, please_inline) = self.attrs.lists(sym::doc).get_word_attr(sym::inline); let pub_underscore = self.vis.node.is_pub() && self.name == kw::Underscore; if pub_underscore && please_inline { rustc_errors::struct_span_err!( cx.tcx.sess, doc_meta_item.unwrap().span(), E0780, "anonymous imports cannot be inlined" ) .span_label(self.span, "anonymous import") .emit(); } // We consider inlining the documentation of `pub use` statements, but we // forcefully don't inline if this is not public or if the // #[doc(no_inline)] attribute is present. // Don't inline doc(hidden) imports so they can be stripped at a later stage. let mut denied = !self.vis.node.is_pub() || pub_underscore || self.attrs.iter().any(|a| { a.has_name(sym::doc) && match a.meta_item_list() { Some(l) => { attr::list_contains_name(&l, sym::no_inline) || attr::list_contains_name(&l, sym::hidden) } None => false, } }); // Also check whether imports were asked to be inlined, in case we're trying to re-export a // crate in Rust 2018+ let path = self.path.clean(cx); let inner = if self.glob { if !denied { let mut visited = FxHashSet::default(); if let Some(items) = inline::try_inline_glob(cx, path.res, &mut visited) { return items; } } Import::new_glob(resolve_use_source(cx, path), true) } else { let name = self.name; if !please_inline { if let Res::Def(DefKind::Mod, did) = path.res { if !did.is_local() && did.index == CRATE_DEF_INDEX { // if we're `pub use`ing an extern crate root, don't inline it unless we // were specifically asked for it denied = true; } } } if !denied { let mut visited = FxHashSet::default(); if let Some(mut items) = inline::try_inline( cx, cx.tcx.parent_module(self.id).to_def_id(), path.res, name, Some(self.attrs), &mut visited, ) { items.push(Item { name: None, attrs: self.attrs.clean(cx), source: self.span.clean(cx), def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), visibility: self.vis.clean(cx), kind: box ImportItem(Import::new_simple( self.name, resolve_use_source(cx, path), false, )), }); return items; } } Import::new_simple(name, resolve_use_source(cx, path), true) }; vec![Item { name: None, attrs: self.attrs.clean(cx), source: self.span.clean(cx), def_id: cx.tcx.hir().local_def_id(self.id).to_def_id(), visibility: self.vis.clean(cx), kind: box ImportItem(inner), }] } } impl Clean<Item> for (&hir::ForeignItem<'_>, Option<Symbol>) { fn clean(&self, cx: &DocContext<'_>) -> Item { let (item, renamed) = self; cx.with_param_env(cx.tcx.hir().local_def_id(item.hir_id).to_def_id(), || { let kind = match item.kind { hir::ForeignItemKind::Fn(ref decl, ref names, ref generics) => { let abi = cx.tcx.hir().get_foreign_abi(item.hir_id); let (generics, decl) = enter_impl_trait(cx, || { (generics.clean(cx), (&**decl, &names[..]).clean(cx)) }); let (all_types, ret_types) = get_all_types(&generics, &decl, cx); ForeignFunctionItem(Function { decl, generics, header: hir::FnHeader { unsafety: hir::Unsafety::Unsafe, abi, constness: hir::Constness::NotConst, asyncness: hir::IsAsync::NotAsync, }, all_types, ret_types, }) } hir::ForeignItemKind::Static(ref ty, mutability) => ForeignStaticItem(Static { type_: ty.clean(cx), mutability, expr: String::new(), }), hir::ForeignItemKind::Type => ForeignTypeItem, }; Item::from_hir_id_and_parts( item.hir_id, Some(renamed.unwrap_or(item.ident.name)), kind, cx, ) }) } } impl Clean<Item> for (&hir::MacroDef<'_>, Option<Symbol>) { fn clean(&self, cx: &DocContext<'_>) -> Item { let (item, renamed) = self; let name = renamed.unwrap_or(item.ident.name); let tts = item.ast.body.inner_tokens().trees().collect::<Vec<_>>(); // Extract the spans of all matchers. They represent the "interface" of the macro. let matchers = tts.chunks(4).map(|arm| arm[0].span()).collect::<Vec<_>>(); let source = if item.ast.macro_rules { format!( "macro_rules! {} {{\n{}}}", name, matchers .iter() .map(|span| { format!(" {} => {{ ... }};\n", span.to_src(cx)) }) .collect::<String>(), ) } else { let vis = item.vis.clean(cx); let def_id = cx.tcx.hir().local_def_id(item.hir_id).to_def_id(); if matchers.len() <= 1 { format!( "{}macro {}{} {{\n ...\n}}", vis.print_with_space(cx.tcx, def_id), name, matchers.iter().map(|span| span.to_src(cx)).collect::<String>(), ) } else { format!( "{}macro {} {{\n{}}}", vis.print_with_space(cx.tcx, def_id), name, matchers .iter() .map(|span| { format!(" {} => {{ ... }},\n", span.to_src(cx)) }) .collect::<String>(), ) } }; Item::from_hir_id_and_parts( item.hir_id, Some(name), MacroItem(Macro { source, imported_from: None }), cx, ) } } impl Clean<TypeBinding> for hir::TypeBinding<'_> { fn clean(&self, cx: &DocContext<'_>) -> TypeBinding { TypeBinding { name: self.ident.name, kind: self.kind.clean(cx) } } } impl Clean<TypeBindingKind> for hir::TypeBindingKind<'_> { fn clean(&self, cx: &DocContext<'_>) -> TypeBindingKind { match *self { hir::TypeBindingKind::Equality { ref ty } => { TypeBindingKind::Equality { ty: ty.clean(cx) } } hir::TypeBindingKind::Constraint { ref bounds } => { TypeBindingKind::Constraint { bounds: bounds.iter().map(|b| b.clean(cx)).collect() } } } } } enum SimpleBound { TraitBound(Vec<PathSegment>, Vec<SimpleBound>, Vec<GenericParamDef>, hir::TraitBoundModifier), Outlives(Lifetime), } impl From<GenericBound> for SimpleBound { fn from(bound: GenericBound) -> Self { match bound.clone() { GenericBound::Outlives(l) => SimpleBound::Outlives(l), GenericBound::TraitBound(t, mod_) => match t.trait_ { Type::ResolvedPath { path, param_names, .. } => SimpleBound::TraitBound( path.segments, param_names.map_or_else(Vec::new, |v| { v.iter().map(|p| SimpleBound::from(p.clone())).collect() }), t.generic_params, mod_, ), _ => panic!("Unexpected bound {:?}", bound), }, } } }
40.477519
129
0.469839
de79301991a9d8e22a3d1401bd9b0cd50076bf17
23,909
// Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). // Licensed under the Apache License, Version 2.0 (see LICENSE). #![deny(warnings)] // Enable all clippy lints except for many of the pedantic ones. It's a shame this needs to be copied and pasted across crates, but there doesn't appear to be a way to include inner attributes from a common source. #![deny( clippy::all, clippy::default_trait_access, clippy::expl_impl_clone_on_copy, clippy::if_not_else, clippy::needless_continue, clippy::unseparated_literal_suffix, clippy::used_underscore_binding )] // It is often more clear to show that nothing is being moved. #![allow(clippy::match_ref_pats)] // Subjective style. #![allow( clippy::len_without_is_empty, clippy::redundant_field_names, clippy::too_many_arguments )] // Default isn't as big a deal as people seem to think it is. #![allow(clippy::new_without_default, clippy::new_ret_no_self)] // Arc<Mutex> can be more clear than needing to grok Orderings: #![allow(clippy::mutex_atomic)] use clap; use env_logger; use fs; use rand; use serde_json; use boxfuture::{BoxFuture, Boxable}; use bytes::Bytes; use clap::{value_t, App, Arg, SubCommand}; use fs::GlobMatching; use futures::compat::Future01CompatExt; use futures::future::TryFutureExt; use futures01::{future, Future}; use hashing::{Digest, Fingerprint}; use parking_lot::Mutex; use protobuf::Message; use rand::seq::SliceRandom; use serde_derive::Serialize; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use std::process::exit; use std::sync::Arc; use std::time::Duration; use store::{Snapshot, Store, StoreFileByDigest, UploadSummary}; use tokio::runtime::Handle; #[derive(Debug)] enum ExitCode { UnknownError = 1, NotFound = 2, } #[derive(Debug)] struct ExitError(String, ExitCode); impl From<String> for ExitError { fn from(s: String) -> Self { ExitError(s, ExitCode::UnknownError) } } #[derive(Serialize)] struct SummaryWithDigest { digest: Digest, summary: Option<UploadSummary>, } #[tokio::main] async fn main() { env_logger::init(); match execute( &App::new("fs_util") .subcommand( SubCommand::with_name("file") .subcommand( SubCommand::with_name("cat") .about("Output the contents of a file by fingerprint.") .arg(Arg::with_name("fingerprint").required(true).takes_value( true, )) .arg(Arg::with_name("size_bytes").required(true).takes_value( true, )), ) .subcommand( SubCommand::with_name("save") .about( "Ingest a file by path, which allows it to be used in Directories/Snapshots. \ Outputs a fingerprint of its contents and its size in bytes, separated by a space.", ) .arg(Arg::with_name("path").required(true).takes_value(true)) .arg(Arg::with_name("output-mode").long("output-mode").possible_values(&["json", "simple"]).default_value("simple").multiple(false).takes_value(true).help( "Set to manipulate the way a report is displayed." )), ), ) .subcommand( SubCommand::with_name("directory") .subcommand( SubCommand::with_name("materialize") .about( "Materialize a directory by fingerprint to the filesystem. \ Destination must not exist before this command is run.", ) .arg(Arg::with_name("fingerprint").required(true).takes_value( true, )) .arg(Arg::with_name("size_bytes").required(true).takes_value( true, )) .arg(Arg::with_name("destination").required(true).takes_value( true, )), ) .subcommand( SubCommand::with_name("save") .about( "Ingest a directory recursively. Saves all files found therein and saves Directory \ protos for each directory found. Outputs a fingerprint of the canonical top-level Directory proto \ and the size of the serialized proto in bytes, separated by a space.", ) .arg( Arg::with_name("globs") .required(true) .takes_value(true) .multiple(true) .help( "globs matching the files and directories which should be included in the \ directory, relative to the root.", ), ) .arg(Arg::with_name("root").long("root").required(true).takes_value(true).help( "Root under which the globs live. The Directory proto produced will be relative \ to this directory.", )) .arg(Arg::with_name("output-mode").long("output-mode").possible_values(&["json", "simple"]).default_value("simple").multiple(false).takes_value(true).help( "Set to manipulate the way a report is displayed." )), ) .subcommand( SubCommand::with_name("cat-proto") .about( "Output the bytes of a serialized Directory proto addressed by fingerprint.", ) .arg( Arg::with_name("output-format") .long("output-format") .takes_value(true) .default_value("binary") .possible_values(&["binary", "recursive-file-list", "recursive-file-list-with-digests", "text"]), ) .arg(Arg::with_name("fingerprint").required(true).takes_value( true, )) .arg(Arg::with_name("size_bytes").required(true).takes_value( true, )), ), ) .subcommand( SubCommand::with_name("cat") .about( "Output the contents of a file or Directory proto addressed by fingerprint.", ) .arg(Arg::with_name("fingerprint").required(true).takes_value( true, )) .arg(Arg::with_name("size_bytes").required(true).takes_value( true, )), ) .subcommand( SubCommand::with_name("directories") .subcommand(SubCommand::with_name("list")) .about("List all directory digests known in the local store") ) .subcommand( SubCommand::with_name("gc") .about("Garbage collect the on-disk store. Note that after running this command, any processes with an open store (e.g. a pantsd) may need to re-initialize their store.") .arg( Arg::with_name("target-size-bytes") .takes_value(true) .long("target-size-bytes") .required(true), ) ) .arg( Arg::with_name("local-store-path") .takes_value(true) .long("local-store-path") .required(false), ) .arg( Arg::with_name("server-address") .takes_value(true) .long("server-address") .required(false) .multiple(true) .number_of_values(1) ) .arg( Arg::with_name("root-ca-cert-file") .help("Path to file containing root certificate authority certificates. If not set, TLS will not be used when connecting to the remote.") .takes_value(true) .long("root-ca-cert-file") .required(false) ) .arg( Arg::with_name("oauth-bearer-token-file") .help("Path to file containing oauth bearer token. If not set, no authorization will be provided to remote servers.") .takes_value(true) .long("oauth-bearer-token-file") .required(false) ) .arg(Arg::with_name("remote-instance-name") .takes_value(true) .long("remote-instance-name") .required(false)) .arg( Arg::with_name("chunk-bytes") .help("Number of bytes to include per-chunk when uploading bytes. grpc imposes a hard message-size limit of around 4MB.") .takes_value(true) .long("chunk-bytes") .required(false) .default_value(&format!("{}", 3 * 1024 * 1024)) ).arg( Arg::with_name("thread-count") .help("Number of threads to use for uploads and downloads") .takes_value(true) .long("thread-count") .required(false) .default_value("1") ) .arg( Arg::with_name("rpc-attempts") .help("Number of times to attempt any RPC before giving up.") .takes_value(true) .long("rpc-attempts") .required(false) .default_value("3") ) .arg( Arg::with_name("connection-limit") .help("Number of concurrent servers to allow connections to.") .takes_value(true) .long("connection-limit") .required(false) .default_value("3") ) .get_matches(), ).await { Ok(_) => {} Err(err) => { eprintln!("{}", err.0); exit(err.1 as i32) } }; } // TODO: Sure, it's a bit long... #[allow(clippy::cognitive_complexity)] async fn execute(top_match: &clap::ArgMatches<'_>) -> Result<(), ExitError> { let store_dir = top_match .value_of("local-store-path") .map(PathBuf::from) .unwrap_or_else(Store::default_path); let runtime = task_executor::Executor::new(Handle::current()); let (store, store_has_remote) = { let (store_result, store_has_remote) = match top_match.values_of("server-address") { Some(cas_address) => { let chunk_size = value_t!(top_match.value_of("chunk-bytes"), usize).expect("Bad chunk-bytes flag"); let root_ca_certs = if let Some(path) = top_match.value_of("root-ca-cert-file") { Some( std::fs::read(path) .map_err(|err| format!("Error reading root CA certs file {}: {}", path, err))?, ) } else { None }; let oauth_bearer_token = if let Some(path) = top_match.value_of("oauth-bearer-token-file") { Some(std::fs::read_to_string(path).map_err(|err| { format!("Error reading oauth bearer token from {:?}: {}", path, err) })?) } else { None }; // Randomize CAS address order to avoid thundering herds from common config. let mut cas_addresses = cas_address.map(str::to_owned).collect::<Vec<_>>(); cas_addresses.shuffle(&mut rand::thread_rng()); ( Store::with_remote( runtime.clone(), &store_dir, cas_addresses, top_match .value_of("remote-instance-name") .map(str::to_owned), root_ca_certs, oauth_bearer_token, value_t!(top_match.value_of("thread-count"), usize).expect("Invalid thread count"), chunk_size, // This deadline is really only in place because otherwise DNS failures // leave this hanging forever. // // Make fs_util have a very long deadline (because it's not configurable, // like it is inside pants) until we switch to Tower (where we can more // carefully control specific components of timeouts). // // See https://github.com/pantsbuild/pants/pull/6433 for more context. Duration::from_secs(30 * 60), // TODO: Take a command line arg. store::BackoffConfig::new( std::time::Duration::from_secs(1), 1.2, std::time::Duration::from_secs(20), )?, value_t!(top_match.value_of("rpc-attempts"), usize).expect("Bad rpc-attempts flag"), value_t!(top_match.value_of("connection-limit"), usize) .expect("Bad connection-limit flag"), ), true, ) } None => (Store::local_only(runtime.clone(), &store_dir), false), }; let store = store_result.map_err(|e| { format!( "Failed to open/create store for directory {:?}: {}", store_dir, e ) })?; (store, store_has_remote) }; match top_match.subcommand() { ("file", Some(sub_match)) => { match sub_match.subcommand() { ("cat", Some(args)) => { let fingerprint = Fingerprint::from_hex_string(args.value_of("fingerprint").unwrap())?; let size_bytes = args .value_of("size_bytes") .unwrap() .parse::<usize>() .expect("size_bytes must be a non-negative number"); let digest = Digest(fingerprint, size_bytes); let write_result = store .load_file_bytes_with(digest, |bytes| io::stdout().write_all(&bytes).unwrap()) .await?; write_result .ok_or_else(|| { ExitError( format!("File with digest {:?} not found", digest), ExitCode::NotFound, ) }) .map(|((), _metadata)| ()) } ("save", Some(args)) => { let path = PathBuf::from(args.value_of("path").unwrap()); // Canonicalize path to guarantee that a relative path has a parent. let posix_fs = make_posix_fs( runtime.clone(), path .canonicalize() .map_err(|e| format!("Error canonicalizing path {:?}: {:?}", path, e))? .parent() .ok_or_else(|| format!("File being saved must have parent but {:?} did not", path))?, ); let file = posix_fs .stat_sync(PathBuf::from(path.file_name().unwrap())) .unwrap() .ok_or_else(|| format!("Tried to save file {:?} but it did not exist", path))?; match file { fs::Stat::File(f) => { let digest = store::OneOffStoreFileByDigest::new(store.clone(), Arc::new(posix_fs)) .store_by_digest(f) .compat() .await .unwrap(); let report = ensure_uploaded_to_remote(&store, store_has_remote, digest) .compat() .await .unwrap(); print_upload_summary(args.value_of("output-mode"), &report); Ok(()) } o => Err( format!( "Tried to save file {:?} but it was not a file, was a {:?}", path, o ) .into(), ), } } (_, _) => unimplemented!(), } } ("directory", Some(sub_match)) => match sub_match.subcommand() { ("materialize", Some(args)) => { let destination = PathBuf::from(args.value_of("destination").unwrap()); let fingerprint = Fingerprint::from_hex_string(args.value_of("fingerprint").unwrap())?; let size_bytes = args .value_of("size_bytes") .unwrap() .parse::<usize>() .expect("size_bytes must be a non-negative number"); let digest = Digest(fingerprint, size_bytes); store .materialize_directory(destination, digest) .compat() .await .map(|metadata| { eprintln!("{}", serde_json::to_string_pretty(&metadata).unwrap()); }) .map_err(|err| { if err.contains("not found") { ExitError(err, ExitCode::NotFound) } else { err.into() } }) } ("save", Some(args)) => { let posix_fs = Arc::new(make_posix_fs( runtime.clone(), args.value_of("root").unwrap(), )); let store_copy = store.clone(); let paths = posix_fs .expand( fs::PathGlobs::new( args .values_of("globs") .unwrap() .map(str::to_string) .collect::<Vec<String>>(), // By using `Ignore`, we say that we don't care if some globs fail to expand. Is // that a valid assumption? fs::StrictGlobMatching::Ignore, fs::GlobExpansionConjunction::AllMatch, ) .parse()?, ) .await .map_err(|e| format!("Error expanding globs: {:?}", e))?; let snapshot = Snapshot::from_path_stats( store_copy.clone(), store::OneOffStoreFileByDigest::new(store_copy, posix_fs), paths, ) .await?; let report = ensure_uploaded_to_remote(&store, store_has_remote, snapshot.digest) .compat() .await?; print_upload_summary(args.value_of("output-mode"), &report); Ok(()) } ("cat-proto", Some(args)) => { let fingerprint = Fingerprint::from_hex_string(args.value_of("fingerprint").unwrap())?; let size_bytes = args .value_of("size_bytes") .unwrap() .parse::<usize>() .expect("size_bytes must be a non-negative number"); let digest = Digest(fingerprint, size_bytes); let proto_bytes: Option<Vec<u8>> = match args.value_of("output-format").unwrap() { "binary" => { let maybe_directory = store.load_directory(digest).await?; maybe_directory.map(|(d, _metadata)| d.write_to_bytes().unwrap()) } "text" => { let maybe_p = store.load_directory(digest).await?; maybe_p.map(|(p, _metadata)| format!("{:?}\n", p).as_bytes().to_vec()) } "recursive-file-list" => { let maybe_v = expand_files(store, digest).compat().await?; maybe_v .map(|v| { v.into_iter() .map(|(name, _digest)| format!("{}\n", name)) .collect::<Vec<String>>() .join("") }) .map(String::into_bytes) } "recursive-file-list-with-digests" => { let maybe_v = expand_files(store, digest).compat().await?; maybe_v .map(|v| { v.into_iter() .map(|(name, digest)| format!("{} {} {}\n", name, digest.0, digest.1)) .collect::<Vec<String>>() .join("") }) .map(String::into_bytes) } format => { return Err(format!("Unexpected value of --output-format arg: {}", format).into()) } }; match proto_bytes { Some(bytes) => { io::stdout().write_all(&bytes).unwrap(); Ok(()) } None => Err(ExitError( format!("Directory with digest {:?} not found", digest), ExitCode::NotFound, )), } } (_, _) => unimplemented!(), }, ("cat", Some(args)) => { let fingerprint = Fingerprint::from_hex_string(args.value_of("fingerprint").unwrap())?; let size_bytes = args .value_of("size_bytes") .unwrap() .parse::<usize>() .expect("size_bytes must be a non-negative number"); let digest = Digest(fingerprint, size_bytes); let v = match store.load_file_bytes_with(digest, |bytes| bytes).await? { None => { let maybe_dir = store.load_directory(digest).await?; maybe_dir.map(|(dir, _metadata)| { Bytes::from( dir .write_to_bytes() .expect("Error serializing Directory proto"), ) }) } Some((bytes, _metadata)) => Some(bytes), }; match v { Some(bytes) => { io::stdout().write_all(&bytes).unwrap(); Ok(()) } None => Err(ExitError( format!("Digest {:?} not found", digest), ExitCode::NotFound, )), } } ("directories", Some(sub_match)) => match sub_match.subcommand() { ("list", _) => { for digest in store .all_local_digests(::store::EntryType::Directory) .expect("Error opening store") { println!("{} {}", digest.0, digest.1); } Ok(()) } _ => unimplemented!(), }, ("gc", Some(args)) => { let target_size_bytes = value_t!(args.value_of("target-size-bytes"), usize) .expect("--target-size-bytes must be passed as a non-negative integer"); store.garbage_collect(target_size_bytes, store::ShrinkBehavior::Compact)?; Ok(()) } (_, _) => unimplemented!(), } } fn expand_files( store: Store, digest: Digest, ) -> impl Future<Item = Option<Vec<(String, Digest)>>, Error = String> { let files = Arc::new(Mutex::new(Vec::new())); expand_files_helper(store, digest, String::new(), files.clone()).map(|maybe| { maybe.map(|()| { let mut v = Arc::try_unwrap(files).unwrap().into_inner(); v.sort_by(|(l, _), (r, _)| l.cmp(r)); v }) }) } fn expand_files_helper( store: Store, digest: Digest, prefix: String, files: Arc<Mutex<Vec<(String, Digest)>>>, ) -> BoxFuture<Option<()>, String> { Box::pin(async move { let maybe_dir = store.load_directory(digest).await?; match maybe_dir { Some((dir, _metadata)) => { { let mut files_unlocked = files.lock(); for file in dir.get_files() { let file_digest: Result<Digest, String> = file.get_digest().into(); files_unlocked.push((format!("{}{}", prefix, file.name), file_digest?)); } } let subdirs_and_digests = dir .get_directories() .iter() .map(move |subdir| { let digest: Result<Digest, String> = subdir.get_digest().into(); digest.map(|digest| (subdir, digest)) }) .collect::<Result<Vec<_>, _>>()?; future::join_all( subdirs_and_digests .into_iter() .map(move |(subdir, digest)| { expand_files_helper( store.clone(), digest, format!("{}{}/", prefix, subdir.name), files.clone(), ) }) .collect::<Vec<_>>(), ) .map(|_| Some(())) .compat() .await } None => Ok(None), } }) .compat() .to_boxed() } fn make_posix_fs<P: AsRef<Path>>(executor: task_executor::Executor, root: P) -> fs::PosixFS { // Unwrapping the output of creating the git ignorer with no patterns is infallible. fs::PosixFS::new( &root, fs::GitignoreStyleExcludes::create(vec![]).unwrap(), executor, ) .unwrap() } fn ensure_uploaded_to_remote( store: &Store, store_has_remote: bool, digest: Digest, ) -> impl Future<Item = SummaryWithDigest, Error = String> { let summary = if store_has_remote { store .ensure_remote_has_recursive(vec![digest]) .map(Some) .to_boxed() } else { future::ok(None).to_boxed() }; summary.map(move |summary| SummaryWithDigest { digest, summary }) } fn print_upload_summary(mode: Option<&str>, report: &SummaryWithDigest) { match mode { Some("json") => println!("{}", serde_json::to_string_pretty(&report).unwrap()), Some("simple") => println!("{} {}", report.digest.0, report.digest.1), // This should never be reached, as clap should error with unknown formats. _ => eprintln!("Unknown summary format."), }; }
34.751453
214
0.537789
e655e0558a9200b54d3929a81a6782db4a481af1
2,828
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Binary file to read data from a Parquet file. //! //! # Install //! //! `parquet-read` can be installed using `cargo`: //! ``` //! cargo install parquet //! ``` //! After this `parquet-read` should be globally available: //! ``` //! parquet-read XYZ.parquet //! ``` //! //! The binary can also be built from the source code and run as follows: //! ``` //! cargo run --bin parquet-read XYZ.parquet //! ``` //! //! # Usage //! //! ``` //! parquet-read <file-path> [num-records] //! ``` //! where `file-path` is the path to a Parquet file and `num-records` is the optional //! numeric option that allows to specify number of records to read from a file. //! When not provided, all records are read. //! //! Note that `parquet-read` reads full file schema, no projection or filtering is //! applied. //! //! For example, //! ``` //! parquet-read data/alltypes_plain.snappy.parquet //! //! parquet-read data/alltypes_plain.snappy.parquet 4 //! ``` extern crate parquet; use std::env; use std::fs::File; use std::path::Path; use std::process; use parquet::file::reader::{FileReader, SerializedFileReader}; fn main() { let args: Vec<String> = env::args().collect(); if args.len() != 2 && args.len() != 3 { println!("Usage: parquet-read <file-path> [num-records]"); process::exit(1); } let mut num_records: Option<usize> = None; if args.len() == 3 { match args[2].parse() { Ok(value) => num_records = Some(value), Err(e) => panic!("Error when reading value for [num-records], {}", e) } } let path = Path::new(&args[1]); let file = File::open(&path).unwrap(); let parquet_reader = SerializedFileReader::new(file).unwrap(); // Use full schema as projected schema let mut iter = parquet_reader.get_row_iter(None).unwrap(); let mut start = 0; let end = num_records.unwrap_or(0); let all_records = num_records.is_none(); while all_records || start < end { match iter.next() { Some(row) => println!("{}", row), None => break } start += 1; } }
28.857143
85
0.662306
14b3aa7795f8ee7412161e98ce268e8104312698
53
net.sf.jasperreports.charts.xml.JRBubbleChartFactory
26.5
52
0.886792
03d63ab937402c4a29d56e20df3746ce059d4abd
2,810
use ::std::collections::HashMap; use ::timely::dataflow::{Scope, Stream}; use timely::dataflow::channels::pact::Exchange; use timely::dataflow::operators::{Filter, Operator}; use ::event::{Auction, Person}; use {queries::NexmarkInput, queries::NexmarkTimer}; pub fn q3<S: Scope<Timestamp=usize>>(input: &NexmarkInput, _nt: NexmarkTimer, scope: &mut S) -> Stream<S, (String, String, String, usize)> { let auctions = input.auctions(scope) .filter(|a| a.category == 10); let people = input.people(scope) .filter(|p| p.state == "OR" || p.state == "ID" || p.state == "CA"); let mut auctions_buffer = vec![]; let mut people_buffer = vec![]; auctions .binary( &people, Exchange::new(|a: &Auction| a.seller as u64), Exchange::new(|p: &Person| p.id as u64), "Q3 Join", |_capability, _info| { let mut state1 = HashMap::new(); let mut state2 = HashMap::<usize, Person>::new(); move |input1, input2, output| { // Process each input auction. input1.for_each(|time, data| { data.swap(&mut auctions_buffer); let mut session = output.session(&time); for auction in auctions_buffer.drain(..) { if let Some(person) = state2.get(&auction.seller) { session.give(( person.name.clone(), person.city.clone(), person.state.clone(), auction.id)); } state1.entry(auction.seller).or_insert(Vec::new()).push(auction); } }); // Process each input person. input2.for_each(|time, data| { data.swap(&mut people_buffer); let mut session = output.session(&time); for person in people_buffer.drain(..) { if let Some(auctions) = state1.get(&person.id) { for auction in auctions.iter() { session.give(( person.name.clone(), person.city.clone(), person.state.clone(), auction.id)); } } state2.insert(person.id, person); } }); } } ) }
39.577465
138
0.418861
0ac1a643ee447e2a4cb11ef43531152b1f3d778c
4,764
extern crate num_traits; extern crate image; extern crate num_cpus; extern crate scoped_threadpool; #[macro_use(value_t, clap_app, crate_version, crate_authors)] extern crate clap; mod color; mod iter; mod fractal; mod img; mod buffer; mod parallel; pub type Uint = u32; use std::error::Error; use std::path::Path; use img::FractalImage; use image::{save_buffer, Gray, RGBA}; use parallel::Parallel; use std::str::FromStr; enum PixelType { Gray, Rgba } impl FromStr for PixelType { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { if s.eq_ignore_ascii_case("rgba") { Ok(PixelType::Rgba) } else if s.eq_ignore_ascii_case("gray") || s.eq_ignore_ascii_case("grey") { Ok(PixelType::Gray) } else { Err("Expected: gray or rgba pixel type".into()) } } } fn run() -> Result<(), Box<dyn Error>> { let opts = clap_app!((env!("CARGO_PKG_NAME")) => (version: crate_version!()) (author: crate_authors!()) (about: env!("CARGO_PKG_DESCRIPTION")) (@arg FILE: -o --output +takes_value "output file name") (@arg WIDTH: -w --width +takes_value "pixels width") (@arg HEIGHT: -h --height +takes_value "pixels height") (@arg THREADS: -t --threads +takes_value "number of threads") (@arg ITERS: -i --iters +takes_value "number of iterations") (@arg MONO: -m --mono "black and white, no greyscales") (@arg PIXEL: -p --pixel +takes_value "pixel type: gray|rgba") (@arg COORDS: -c --coords +takes_value "x0,y0,x1,y1") ).get_matches(); let filename = &Path::new(opts.value_of("FILE").unwrap_or("mandelbrot.png")); let pixel = opts.value_of("PIXEL").unwrap_or("gray"); let pixel = PixelType::from_str(pixel)?; let width = value_t!(opts, "WIDTH", u32).unwrap_or(700); let height = value_t!(opts, "HEIGHT", u32).unwrap_or(400); let threads = value_t!(opts, "THREADS", u32).unwrap_or_else(|_| num_cpus::get() as u32); let iters = value_t!(opts, "ITERS", Uint).unwrap_or_else(|_| 200 as Uint); let mono = opts.is_present("MONO"); let mut x0: f64 = -2.5; let mut y0: f64 = -1.0; let mut x1: f64 = 1.0; let mut y1: f64 = 1.0; if let Some(coords) = opts.value_of("COORDS") { let mut coords = coords.split(',').map(|v| f64::from_str(v)); if let Some(v) = coords.next() { x0 = v? } if let Some(v) = coords.next() { y0 = v? } if let Some(v) = coords.next() { x1 = v? } if let Some(v) = coords.next() { y1 = v? } } let img = fractal::Fractal::from_view_box(width, height, x0, y0, x1, y1); match threads { 0 | 1 => { match pixel { PixelType::Rgba => { if mono { FractalImage::<image::Rgba<u8>>::to_img_mono(&img, iters).save(filename)?; } else { FractalImage::<image::Rgba<u8>>::to_img_gray(&img, iters).save(filename)?; } }, PixelType::Gray => { if mono { FractalImage::<image::Luma<u8>>::to_img_mono(&img, iters).save(filename)?; } else { FractalImage::<image::Luma<u8>>::to_img_gray(&img, iters).save(filename)?; } } } }, _ => { match pixel { PixelType::Rgba => { let ref buffer = vec_u32_as_vec_u8(Parallel::<u32>::to_img_buffer(&img, iters, mono, threads)); save_buffer(filename, buffer, img.width(), img.height(), RGBA(8))?; }, PixelType::Gray => { let ref buffer = Parallel::<u8>::to_img_buffer(&img, iters, mono, threads); save_buffer(filename, buffer, img.width(), img.height(), Gray(8))?; } } } } Ok(()) } fn vec_u32_as_vec_u8(mut v: Vec<u32>) -> Vec<u8> { use std::mem; let p = v.as_mut_ptr(); let len = v.len(); let cap = v.capacity(); unsafe { // Cast `v` into the void: no destructor run, so we are in // complete control of the allocation to which `p` points. mem::forget(v); let len = len * mem::size_of::<u32>() / mem::size_of::<u8>(); let cap = cap * mem::size_of::<u32>() / mem::size_of::<u8>(); // Put everything back together into a Vec Vec::from_raw_parts(p as *mut u8, len, cap) } } fn main() { if let Err(e) = run() { eprintln!("{}", e); ::std::process::exit(1); } }
33.549296
115
0.529807
088148b0300166727e4548bbee3adb5f024665f3
291
use async_trait::async_trait; use crate::prelude::*; #[async_trait] pub trait DataSourceLoader { async fn load_dynamic_data_sources( &self, id: SubgraphDeploymentId, logger: Logger, manifest: SubgraphManifest, ) -> Result<Vec<DataSource>, Error>; }
20.785714
40
0.66323
4a3f7e59d8c798b063c27ef9091cd1fb5aebb154
1,854
pub(crate) use tracing_core::span::Id; #[derive(Debug)] struct ContextId { id: Id, duplicate: bool, } /// `SpanStack` tracks what spans are currently executing on a thread-local basis. /// /// A "separate current span" for each thread is a semantic choice, as each span /// can be executing in a different thread. #[derive(Debug, Default)] pub(crate) struct SpanStack { stack: Vec<ContextId>, } impl SpanStack { #[inline] pub(super) fn push(&mut self, id: Id) -> bool { let duplicate = self.stack.iter().any(|i| i.id == id); self.stack.push(ContextId { id, duplicate }); !duplicate } #[inline] pub(super) fn pop(&mut self, expected_id: &Id) -> bool { if let Some((idx, _)) = self .stack .iter() .enumerate() .rev() .find(|(_, ctx_id)| ctx_id.id == *expected_id) { let ContextId { id: _, duplicate } = self.stack.remove(idx); return !duplicate; } false } #[inline] pub(crate) fn iter(&self) -> impl Iterator<Item = &Id> { self.stack .iter() .rev() .filter_map(|ContextId { id, duplicate }| if !*duplicate { Some(id) } else { None }) } #[inline] pub(crate) fn current(&self) -> Option<&Id> { self.iter().next() } } #[cfg(test)] mod tests { use super::{Id, SpanStack}; #[test] fn pop_last_span() { let mut stack = SpanStack::default(); let id = Id::from_u64(1); stack.push(id.clone()); assert!(stack.pop(&id)); } #[test] fn pop_first_span() { let mut stack = SpanStack::default(); stack.push(Id::from_u64(1)); stack.push(Id::from_u64(2)); let id = Id::from_u64(1); assert!(stack.pop(&id)); } }
23.769231
96
0.533981
72e3b45dc91b7b8217619deea64da07cc1f63c0a
3,786
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast::{MetaItem, Item, Expr, MutMutable}; use codemap::Span; use ext::base::ExtCtxt; use ext::build::AstBuilder; use ext::deriving::generic::*; use ext::deriving::generic::ty::*; use parse::token::InternedString; use ptr::P; pub fn expand_deriving_hash<F>(cx: &mut ExtCtxt, span: Span, mitem: &MetaItem, item: &Item, push: F) where F: FnOnce(P<Item>), { let (path, generics, args) = if cx.ecfg.deriving_hash_type_parameter { (Path::new_(vec!("std", "hash", "Hash"), None, vec!(box Literal(Path::new_local("__S"))), true), LifetimeBounds { lifetimes: Vec::new(), bounds: vec!(("__S", vec!(Path::new(vec!("std", "hash", "Writer"))))), }, Path::new_local("__S")) } else { (Path::new(vec!("std", "hash", "Hash")), LifetimeBounds::empty(), Path::new(vec!("std", "hash", "sip", "SipState"))) }; let inline = cx.meta_word(span, InternedString::new("inline")); let attrs = vec!(cx.attribute(span, inline)); let hash_trait_def = TraitDef { span: span, attributes: Vec::new(), path: path, additional_bounds: Vec::new(), generics: generics, methods: vec!( MethodDef { name: "hash", generics: LifetimeBounds::empty(), explicit_self: borrowed_explicit_self(), args: vec!(Ptr(box Literal(args), Borrowed(None, MutMutable))), ret_ty: nil_ty(), attributes: attrs, combine_substructure: combine_substructure(|a, b, c| { hash_substructure(a, b, c) }) } ) }; hash_trait_def.expand(cx, mitem, item, push); } fn hash_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> P<Expr> { let state_expr = match substr.nonself_args { [ref state_expr] => state_expr, _ => cx.span_bug(trait_span, "incorrect number of arguments in `deriving(Hash)`") }; let hash_ident = substr.method_ident; let call_hash = |span, thing_expr| { let expr = cx.expr_method_call(span, thing_expr, hash_ident, vec!(state_expr.clone())); cx.stmt_expr(expr) }; let mut stmts = Vec::new(); let fields = match *substr.fields { Struct(ref fs) => fs, EnumMatching(index, variant, ref fs) => { // Determine the discriminant. We will feed this value to the byte // iteration function. let discriminant = match variant.node.disr_expr { Some(ref d) => d.clone(), None => cx.expr_uint(trait_span, index) }; stmts.push(call_hash(trait_span, discriminant)); fs } _ => cx.span_bug(trait_span, "impossible substructure in `deriving(Hash)`") }; for &FieldInfo { ref self_, span, .. } in fields.iter() { stmts.push(call_hash(span, self_.clone())); } if stmts.len() == 0 { cx.span_bug(trait_span, "#[deriving(Hash)] needs at least one field"); } cx.expr_block(cx.block(trait_span, stmts, None)) }
35.383178
95
0.564184
1663677c8db8ab2b9cd8f0b25cbc8e411d057345
1,371
#![feature(macro_rules,box_syntax, box_patterns)] struct Node { value: i32, left: Option<Box<Node>>, right: Option<Box<Node>>, } impl Node { fn new(value: i32) -> Node { Node { value: value, left: None, right: None, } } fn insert(&mut self, value: i32) { let new_node = Some(Box::new(Node::new(value))); if value < self.value { match self.left.as_mut() { None => self.left = new_node, Some(left) => left.insert(value), } } else { match self.right.as_mut() { None => self.right = new_node, Some(right) => right.insert(value), } } } fn search(&self, target: i32) -> Option<i32> { match self.value { value if target == value => Some(value), value if target < value => self.left.as_ref()?.search(target), value if target > value => self.right.as_ref()?.search(target), _ => None, } } } fn main () { let mut my_tree: Node = Node::new(3); for key in vec!(2, 4, 0, 8, 11, 18, 22, 16, 12, 7, 10).iter() { my_tree.insert(*key); } match my_tree.search(15) { None => println!("not found"), Some(val) => {println!("{} found",val)}, } }
24.927273
75
0.479942
d6ce4470ccdc141aa3d2bfebbd30a833dd72bc82
3,336
//! Errors returned from failed attempts to convert data. use prost::DecodeError; use std::fmt::{Debug, Display, Formatter}; /// Error thrown when some data received from the wire could not be properly /// converted to a desired Rust type. #[derive(Clone, Debug)] pub struct ConversionError { /// Describes the reason why the conversion failed. pub kind: ConversionErrorKind, /// Debug string of the source value that failed to be converted. pub source: String, /// Name of the target Rust type that the value failed to convert to. pub target_type_name: String, } #[derive(Clone, Debug, Eq, PartialEq)] pub enum ConversionErrorKind { /// When the converter didn't know how to convert one type to another /// because the conversion hasn't been defined. Incompatible, /// When the source value is out of range of the target type. OutOfRange, /// When a required UDT field was not found. FieldNotFound(&'static str), /// When the number of elements in a vector or a tuple /// does not match the expected number of elements. WrongNumberOfItems { actual: usize, expected: usize }, /// When the converter attempted to decode a binary blob, /// but the conversion failed due to invalid data. GrpcDecodeError(DecodeError), } impl ConversionError { fn new<S: Debug, T>(kind: ConversionErrorKind, source: S) -> ConversionError { ConversionError { kind, source: format!("{:?}", source), target_type_name: std::any::type_name::<T>().to_string(), } } pub fn incompatible<S: Debug, T>(source: S) -> ConversionError { Self::new::<S, T>(ConversionErrorKind::Incompatible, source) } pub fn out_of_range<S: Debug, T>(source: S) -> ConversionError { Self::new::<S, T>(ConversionErrorKind::OutOfRange, source) } pub fn field_not_found<S: Debug, T>(source: S, field_name: &'static str) -> ConversionError { Self::new::<S, T>(ConversionErrorKind::FieldNotFound(field_name), source) } pub fn wrong_number_of_items<S: Debug, T>( source: S, actual: usize, expected: usize, ) -> ConversionError { Self::new::<S, T>( ConversionErrorKind::WrongNumberOfItems { actual, expected }, source, ) } pub fn decode_error<S: Debug, T>(source: S, error: DecodeError) -> ConversionError { Self::new::<S, T>(ConversionErrorKind::GrpcDecodeError(error), source) } } impl Display for ConversionError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let reason = match &self.kind { ConversionErrorKind::Incompatible => "incompatible types".to_string(), ConversionErrorKind::GrpcDecodeError(e) => format!("gRPC decode error {}", e), ConversionErrorKind::OutOfRange => "value out of range".to_string(), ConversionErrorKind::FieldNotFound(field) => format!("field \"{}\" not found", field), ConversionErrorKind::WrongNumberOfItems { actual, expected } => { format!("expected {} but got {} items", expected, actual) } }; write!( f, "Cannot convert value {} to {}: {}", self.source, self.target_type_name, reason ) } }
35.489362
98
0.632194
03e518b0d711cf5e53b6f9de2c71c0415ca932df
36,165
/// A convenience macro for loading the YAML file at compile time (relative to the current file, /// like modules work). That YAML object can then be passed to this function. /// /// # Panics /// /// The YAML file must be properly formatted or this function will panic!(). A good way to /// ensure this doesn't happen is to run your program with the `--help` switch. If this passes /// without error, you needn't worry because the YAML is properly formatted. /// /// # Examples /// /// The following example shows how to load a properly formatted YAML file to build an instance /// of an `App` struct. /// /// ```ignore /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let yml = load_yaml!("app.yml"); /// let app = App::from_yaml(yml); /// /// // continued logic goes here, such as `app.get_matches()` etc. /// # } /// ``` #[cfg(feature = "yaml")] #[macro_export] macro_rules! load_yaml { ($yml:expr) => { &$crate::YamlLoader::load_from_str(include_str!($yml)).expect("failed to load YAML file")[0] }; } /// Convenience macro getting a typed value `T` where `T` implements [`std::str::FromStr`] from an /// argument value. This macro returns a `Result<T,String>` which allows you as the developer to /// decide what you'd like to do on a failed parse. There are two types of errors, parse failures /// and those where the argument wasn't present (such as a non-required argument). You can use /// it to get a single value, or a iterator as with the [`ArgMatches::values_of`] /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let matches = App::new("myapp") /// .arg("[length] 'Set the length to use as a pos whole num, i.e. 20'") /// .get_matches(); /// /// let len = value_t!(matches.value_of("length"), u32).unwrap_or_else(|e| e.exit()); /// let also_len = value_t!(matches, "length", u32).unwrap_or_else(|e| e.exit()); /// /// println!("{} + 2: {}", len, len + 2); /// # } /// ``` /// [`std::str::FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html /// [`ArgMatches::values_of`]: ./struct.ArgMatches.html#method.values_of /// [`Result<T,String>`]: https://doc.rust-lang.org/std/result/enum.Result.html #[macro_export] macro_rules! value_t { ($m:ident, $v:expr, $t:ty) => { $crate::value_t!($m.value_of($v), $t) }; ($m:ident.value_of($v:expr), $t:ty) => { if let Some(v) = $m.value_of($v) { match v.parse::<$t>() { Ok(val) => Ok(val), Err(_) => Err($crate::Error::value_validation_auto(&format!( "The argument '{}' isn't a valid value", v ))), } } else { Err($crate::Error::argument_not_found_auto($v)) } }; } /// Convenience macro getting a typed value `T` where `T` implements [`std::str::FromStr`] or /// exiting upon error, instead of returning a [`Result`] type. /// /// **NOTE:** This macro is for backwards compatibility sake. Prefer /// [`value_t!(/* ... */).unwrap_or_else(|e| e.exit())`] /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let matches = App::new("myapp") /// .arg("[length] 'Set the length to use as a pos whole num, i.e. 20'") /// .get_matches(); /// /// let len = value_t_or_exit!(matches.value_of("length"), u32); /// let also_len = value_t_or_exit!(matches, "length", u32); /// /// println!("{} + 2: {}", len, len + 2); /// # } /// ``` /// [`std::str::FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html /// [`value_t!(/* ... */).unwrap_or_else(|e| e.exit())`]: ./macro.value_t!.html #[macro_export] macro_rules! value_t_or_exit { ($m:ident, $v:expr, $t:ty) => { $crate::value_t_or_exit!($m.value_of($v), $t) }; ($m:ident.value_of($v:expr), $t:ty) => { if let Some(v) = $m.value_of($v) { match v.parse::<$t>() { Ok(val) => val, Err(_) => $crate::Error::value_validation_auto(&format!( "The argument '{}' isn't a valid value", v )) .exit(), } } else { $crate::Error::argument_not_found_auto($v).exit() } }; } /// Convenience macro getting a typed value [`Vec<T>`] where `T` implements [`std::str::FromStr`] /// This macro returns a [`clap::Result<Vec<T>>`] which allows you as the developer to decide /// what you'd like to do on a failed parse. /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let matches = App::new("myapp") /// .arg("[seq]... 'A sequence of pos whole nums, i.e. 20 45'") /// .get_matches(); /// /// let vals = values_t!(matches.values_of("seq"), u32).unwrap_or_else(|e| e.exit()); /// for v in &vals { /// println!("{} + 2: {}", v, v + 2); /// } /// /// let vals = values_t!(matches, "seq", u32).unwrap_or_else(|e| e.exit()); /// for v in &vals { /// println!("{} + 2: {}", v, v + 2); /// } /// # } /// ``` /// [`std::str::FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html /// [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html /// [`clap::Result<Vec<T>>`]: ./type.Result.html #[macro_export] macro_rules! values_t { ($m:ident, $v:expr, $t:ty) => { $crate::values_t!($m.values_of($v), $t) }; ($m:ident.values_of($v:expr), $t:ty) => { if let Some(vals) = $m.values_of($v) { let mut tmp = vec![]; let mut err = None; for pv in vals { match pv.parse::<$t>() { Ok(rv) => tmp.push(rv), Err(..) => { err = Some($crate::Error::value_validation_auto(&format!( "The argument '{}' isn't a valid value", pv ))); break; } } } match err { Some(e) => Err(e), None => Ok(tmp), } } else { Err($crate::Error::argument_not_found_auto($v)) } }; } /// Convenience macro getting a typed value [`Vec<T>`] where `T` implements [`std::str::FromStr`] /// or exiting upon error. /// /// **NOTE:** This macro is for backwards compatibility sake. Prefer /// [`values_t!(/* ... */).unwrap_or_else(|e| e.exit())`] /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let matches = App::new("myapp") /// .arg("[seq]... 'A sequence of pos whole nums, i.e. 20 45'") /// .get_matches(); /// /// let vals = values_t_or_exit!(matches.values_of("seq"), u32); /// for v in &vals { /// println!("{} + 2: {}", v, v + 2); /// } /// /// // type for example only /// let vals: Vec<u32> = values_t_or_exit!(matches, "seq", u32); /// for v in &vals { /// println!("{} + 2: {}", v, v + 2); /// } /// # } /// ``` /// [`values_t!(/* ... */).unwrap_or_else(|e| e.exit())`]: ./macro.values_t!.html /// [`std::str::FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html /// [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html #[macro_export] macro_rules! values_t_or_exit { ($m:ident, $v:expr, $t:ty) => { $crate::values_t_or_exit!($m.values_of($v), $t) }; ($m:ident.values_of($v:expr), $t:ty) => { if let Some(vals) = $m.values_of($v) { vals.map(|v| { v.parse::<$t>().unwrap_or_else(|_| { $crate::Error::value_validation_auto(&format!( "One or more arguments aren't valid values" )) .exit() }) }) .collect::<Vec<$t>>() } else { $crate::Error::argument_not_found_auto($v).exit() } }; } // _clap_count_exprs! is derived from https://github.com/DanielKeep/rust-grabbag // commit: 82a35ca5d9a04c3b920622d542104e3310ee5b07 // License: MIT // Copyright ⓒ 2015 grabbag contributors. // Licensed under the MIT license (see LICENSE or <http://opensource.org // /licenses/MIT>) or the Apache License, Version 2.0 (see LICENSE of // <http://www.apache.org/licenses/LICENSE-2.0>), at your option. All // files in the project carrying such notice may not be copied, modified, // or distributed except according to those terms. // /// Counts the number of comma-delimited expressions passed to it. The result is a compile-time /// evaluable expression, suitable for use as a static array size, or the value of a `const`. /// /// # Examples /// /// ``` /// # #[macro_use] extern crate clap; /// # fn main() { /// const COUNT: usize = _clap_count_exprs!(a, 5+1, "hi there!".into_string()); /// assert_eq!(COUNT, 3); /// # } /// ``` #[macro_export] macro_rules! _clap_count_exprs { () => { 0 }; ($e:expr) => { 1 }; ($e:expr, $($es:expr),+) => { 1 + $crate::_clap_count_exprs!($($es),*) }; } /// Convenience macro to generate more complete enums with variants to be used as a type when /// parsing arguments. This enum also provides a `variants()` function which can be used to /// retrieve a `Vec<&'static str>` of the variant names, as well as implementing [`FromStr`] and /// [`Display`] automatically. /// /// **NOTE:** Case insensitivity is supported for ASCII characters only. It's highly recommended to /// use [`Arg::case_insensitive(true)`] for args that will be used with these enums /// /// **NOTE:** This macro automatically implements [`std::str::FromStr`] and [`std::fmt::Display`] /// /// **NOTE:** These enums support pub (or not) and uses of the `#[derive()]` traits /// /// # Examples /// /// ```rust /// # #[macro_use] /// # extern crate clap; /// # use clap::{App, Arg}; /// arg_enum!{ /// #[derive(PartialEq, Debug)] /// pub enum Foo { /// Bar, /// Baz, /// Qux /// } /// } /// // Foo enum can now be used via Foo::Bar, or Foo::Baz, etc /// // and implements std::str::FromStr to use with the value_t! macros /// fn main() { /// let m = App::new("app") /// .arg(Arg::from("<foo> 'the foo'") /// .possible_values(&Foo::variants()) /// .case_insensitive(true)) /// .get_matches_from(vec![ /// "app", "baz" /// ]); /// let f = value_t!(m, "foo", Foo).unwrap_or_else(|e| e.exit()); /// /// assert_eq!(f, Foo::Baz); /// } /// ``` /// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html /// [`std::str::FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html /// [`Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html /// [`std::fmt::Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html /// [`Arg::case_insensitive(true)`]: ./struct.Arg.html#method.case_insensitive #[macro_export] macro_rules! arg_enum { (@as_item $($i:item)*) => ($($i)*); (@impls ( $($tts:tt)* ) -> ($e:ident, $($v:ident),+)) => { $crate::arg_enum!(@as_item $($tts)* impl ::std::str::FromStr for $e { type Err = String; fn from_str(s: &str) -> ::std::result::Result<Self,Self::Err> { match s { $(stringify!($v) | _ if s.eq_ignore_ascii_case(stringify!($v)) => Ok($e::$v)),+, _ => Err({ let v = vec![ $(stringify!($v),)+ ]; format!("valid values: {}", v.join(" ,")) }), } } } impl ::std::fmt::Display for $e { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { match *self { $($e::$v => write!(f, stringify!($v)),)+ } } } impl $e { #[allow(dead_code)] pub fn variants() -> [&'static str; $crate::_clap_count_exprs!($(stringify!($v)),+)] { [ $(stringify!($v),)+ ] } }); }; ($(#[$($m:meta),+])+ pub enum $e:ident { $($v:ident $(=$val:expr)*,)+ } ) => { $crate::arg_enum!(@impls ($(#[$($m),+])+ pub enum $e { $($v$(=$val)*),+ }) -> ($e, $($v),+) ); }; ($(#[$($m:meta),+])+ pub enum $e:ident { $($v:ident $(=$val:expr)*),+ } ) => { $crate::arg_enum!(@impls ($(#[$($m),+])+ pub enum $e { $($v$(=$val)*),+ }) -> ($e, $($v),+) ); }; ($(#[$($m:meta),+])+ enum $e:ident { $($v:ident $(=$val:expr)*,)+ } ) => { $crate::arg_enum!($(#[$($m:meta),+])+ enum $e:ident { $($v:ident $(=$val:expr)*),+ } ); }; ($(#[$($m:meta),+])+ enum $e:ident { $($v:ident $(=$val:expr)*),+ } ) => { $crate::arg_enum!(@impls ($(#[$($m),+])+ enum $e { $($v$(=$val)*),+ }) -> ($e, $($v),+) ); }; (pub enum $e:ident { $($v:ident $(=$val:expr)*,)+ } ) => { $crate::arg_enum!(pub enum $e:ident { $($v:ident $(=$val:expr)*),+ }); }; (pub enum $e:ident { $($v:ident $(=$val:expr)*),+ } ) => { $crate::arg_enum!(@impls (pub enum $e { $($v$(=$val)*),+ }) -> ($e, $($v),+) ); }; (enum $e:ident { $($v:ident $(=$val:expr)*,)+ } ) => { $crate::arg_enum!(enum $e:ident { $($v:ident $(=$val:expr)*),+ }); }; (enum $e:ident { $($v:ident $(=$val:expr)*),+ } ) => { $crate::arg_enum!(@impls (enum $e { $($v$(=$val)*),+ }) -> ($e, $($v),+) ); }; } /// Allows you to pull the version from your Cargo.toml at compile time as /// `MAJOR.MINOR.PATCH_PKGVERSION_PRE` /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let m = App::new("app") /// .version(crate_version!()) /// .get_matches(); /// # } /// ``` #[cfg(not(feature = "no_cargo"))] #[macro_export] macro_rules! crate_version { () => { env!("CARGO_PKG_VERSION") }; } /// Allows you to pull the authors for the app from your Cargo.toml at /// compile time in the form: /// `"author1 lastname <[email protected]>:author2 lastname <[email protected]>"` /// /// You can replace the colons with a custom separator by supplying a /// replacement string, so, for example, /// `crate_authors!(",\n")` would become /// `"author1 lastname <[email protected]>,\nauthor2 lastname <[email protected]>,\nauthor3 lastname <[email protected]>"` /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let m = App::new("app") /// .author(crate_authors!("\n")) /// .get_matches(); /// # } /// ``` #[cfg(not(feature = "no_cargo"))] #[macro_export] macro_rules! crate_authors { ($sep:expr) => {{ use std::ops::Deref; use std::boxed::Box; use std::cell::Cell; #[allow(missing_copy_implementations)] #[allow(dead_code)] struct CargoAuthors { authors: Cell<Option<&'static str>>, __private_field: (), }; impl Deref for CargoAuthors { type Target = str; fn deref(&self) -> &'static str { let authors = self.authors.take(); if authors.is_some() { let unwrapped_authors = authors.unwrap(); self.authors.replace(Some(unwrapped_authors)); unwrapped_authors } else { // This caches the result for subsequent invocations of the same instance of the macro // to avoid performing one memory allocation per call. // If performance ever becomes a problem for this code, it should be moved to build.rs let s: Box<String> = Box::new(env!("CARGO_PKG_AUTHORS").replace(':', $sep)); let static_string = Box::leak(s); self.authors.replace(Some(&*static_string)); &*static_string // weird but compiler-suggested way to turn a String into &str } } } &*CargoAuthors { authors: std::cell::Cell::new(Option::None), __private_field: (), } }}; () => { env!("CARGO_PKG_AUTHORS") }; } /// Allows you to pull the description from your Cargo.toml at compile time. /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let m = App::new("app") /// .about(crate_description!()) /// .get_matches(); /// # } /// ``` #[cfg(not(feature = "no_cargo"))] #[macro_export] macro_rules! crate_description { () => { env!("CARGO_PKG_DESCRIPTION") }; } /// Allows you to pull the name from your Cargo.toml at compile time. /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let m = App::new(crate_name!()) /// .get_matches(); /// # } /// ``` #[cfg(not(feature = "no_cargo"))] #[macro_export] macro_rules! crate_name { () => { env!("CARGO_PKG_NAME") }; } /// Allows you to build the `App` instance from your Cargo.toml at compile time. /// /// Equivalent to using the `crate_*!` macros with their respective fields. /// /// Provided separator is for the [`crate_authors!`](macro.crate_authors.html) macro, /// refer to the documentation therefor. /// /// **NOTE:** Changing the values in your `Cargo.toml` does not trigger a re-build automatically, /// and therefore won't change the generated output until you recompile. /// /// **Pro Tip:** In some cases you can "trick" the compiler into triggering a rebuild when your /// `Cargo.toml` is changed by including this in your `src/main.rs` file /// `include_str!("../Cargo.toml");` /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # fn main() { /// let m = app_from_crate!().get_matches(); /// # } /// ``` #[cfg(not(feature = "no_cargo"))] #[macro_export] macro_rules! app_from_crate { () => { $crate::App::new($crate::crate_name!()) .version($crate::crate_version!()) .author($crate::crate_authors!()) .about($crate::crate_description!()) }; ($sep:expr) => { $crate::App::new($crate::crate_name!()) .version($crate::crate_version!()) .author($crate::crate_authors!($sep)) .about($crate::crate_description!()) }; } /// Build `App`, `Arg`s, ``s and `Group`s with Usage-string like input /// but without the associated parsing runtime cost. /// /// `clap_app!` also supports several shorthand syntaxes. /// /// # Examples /// /// ```no_run /// # #[macro_use] /// # extern crate clap; /// # fn main() { /// let matches = clap_app!(myapp => /// (version: "1.0") /// (author: "Kevin K. <[email protected]>") /// (about: "Does awesome things") /// (@arg CONFIG: -c --config +takes_value "Sets a custom config file") /// (@arg INPUT: +required "Sets the input file to use") /// (@arg debug: -d ... "Sets the level of debugging information") /// (@group difficulty => /// (@arg hard: -h --hard "Sets hard mode") /// (@arg normal: -n --normal "Sets normal mode") /// (@arg easy: -e --easy "Sets easy mode") /// ) /// (@subcommand test => /// (about: "controls testing features") /// (version: "1.3") /// (author: "Someone E. <[email protected]>") /// (@arg verbose: -v --verbose "Print test information verbosely") /// ) /// ); /// # } /// ``` /// # Shorthand Syntax for Args /// /// * A single hyphen followed by a character (such as `-c`) sets the [`Arg::short`] /// * A double hyphen followed by a character or word (such as `--config`) sets [`Arg::long`] /// * If one wishes to use a [`Arg::long`] with a hyphen inside (i.e. `--config-file`), you /// must use `--("config-file")` due to limitations of the Rust macro system. /// * Three dots (`...`) sets [`Arg::multiple(true)`] /// * Angled brackets after either a short or long will set [`Arg::value_name`] and /// `Arg::required(true)` such as `--config <FILE>` = `Arg::value_name("FILE")` and /// `Arg::required(true)` /// * Square brackets after either a short or long will set [`Arg::value_name`] and /// `Arg::required(false)` such as `--config [FILE]` = `Arg::value_name("FILE")` and /// `Arg::required(false)` /// * There are short hand syntaxes for Arg methods that accept booleans /// * A plus sign will set that method to `true` such as `+required` = `Arg::required(true)` /// * An exclamation will set that method to `false` such as `!required` = `Arg::required(false)` /// * A `#{min, max}` will set [`Arg::min_values(min)`] and [`Arg::max_values(max)`] /// * An asterisk (`*`) will set `Arg::required(true)` /// * Curly brackets around a `fn` will set [`Arg::validator`] as in `{fn}` = `Arg::validator(fn)` /// * An Arg method that accepts a string followed by square brackets will set that method such as /// `conflicts_with[FOO]` will set `Arg::conflicts_with("FOO")` (note the lack of quotes around /// `FOO` in the macro) /// * An Arg method that takes a string and can be set multiple times (such as /// [`Arg::conflicts_with`]) followed by square brackets and a list of values separated by spaces /// will set that method such as `conflicts_with[FOO BAR BAZ]` will set /// `Arg::conflicts_with("FOO")`, `Arg::conflicts_with("BAR")`, and `Arg::conflicts_with("BAZ")` /// (note the lack of quotes around the values in the macro) /// /// # Shorthand Syntax for Groups /// /// * There are short hand syntaxes for `ArgGroup` methods that accept booleans /// * A plus sign will set that method to `true` such as `+required` = `ArgGroup::required(true)` /// * An exclamation will set that method to `false` such as `!required` = `ArgGroup::required(false)` /// /// # Alternative form for non-ident values /// /// Certain places that normally accept an `ident`, will optionally accept an alternative of `("expr enclosed by parens")` /// * `(@arg something: --something)` could also be `(@arg ("something-else"): --("something-else"))` /// * `(@subcommand something => ...)` could also be `(@subcommand ("something-else") => ...)` /// /// [`Arg::short`]: ./struct.Arg.html#method.short /// [`Arg::long`]: ./struct.Arg.html#method.long /// [`Arg::multiple(true)`]: ./struct.Arg.html#method.multiple /// [`Arg::value_name`]: ./struct.Arg.html#method.value_name /// [`Arg::min_values(min)`]: ./struct.Arg.html#method.min_values /// [`Arg::max_values(max)`]: ./struct.Arg.html#method.max_values /// [`Arg::validator`]: ./struct.Arg.html#method.validator /// [`Arg::conflicts_with`]: ./struct.Arg.html#method.conflicts_with #[macro_export] macro_rules! clap_app { (@app ($builder:expr)) => { $builder }; (@app ($builder:expr) (@arg ($name:expr): $($tail:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @app ($builder.arg( $crate::clap_app!{ @arg ($crate::Arg::with_name($name)) (-) $($tail)* })) $($tt)* } }; (@app ($builder:expr) (@arg $name:ident: $($tail:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @app ($builder.arg( $crate::clap_app!{ @arg ($crate::Arg::with_name(stringify!($name))) (-) $($tail)* })) $($tt)* } }; (@app ($builder:expr) (@setting $setting:ident) $($tt:tt)*) => { $crate::clap_app!{ @app ($builder.setting($crate::AppSettings::$setting)) $($tt)* } }; // Treat the application builder as an argument to set its attributes (@app ($builder:expr) (@attributes $($attr:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @app ($crate::clap_app!{ @arg ($builder) $($attr)* }) $($tt)* } }; (@app ($builder:expr) (@group $name:ident => $($tail:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @app ($crate::clap_app!{ @group ($builder, $crate::ArgGroup::with_name(stringify!($name))) $($tail)* }) $($tt)* } }; (@app ($builder:expr) (@group $name:ident !$ident:ident => $($tail:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @app ($crate::clap_app!{ @group ($builder, $crate::ArgGroup::with_name(stringify!($name)).$ident(false)) $($tail)* }) $($tt)* } }; (@app ($builder:expr) (@group $name:ident +$ident:ident => $($tail:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @app ($crate::clap_app!{ @group ($builder, $crate::ArgGroup::with_name(stringify!($name)).$ident(true)) $($tail)* }) $($tt)* } }; // Handle subcommand creation (@app ($builder:expr) (@subcommand $name:ident => $($tail:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @app ($builder.subcommand( $crate::clap_app!{ @app ($crate::App::new(stringify!($name))) $($tail)* } )) $($tt)* } }; (@app ($builder:expr) (@subcommand ($name:expr) => $($tail:tt)*) $($tt:tt)*) => { clap_app!{ @app ($builder.subcommand( $crate::clap_app!{ @app ($crate::App::new($name)) $($tail)* } )) $($tt)* } }; // Yaml like function calls - used for setting various meta directly against the app (@app ($builder:expr) ($ident:ident: $($v:expr),*) $($tt:tt)*) => { // clap_app!{ @app ($builder.$ident($($v),*)) $($tt)* } $crate::clap_app!{ @app ($builder.$ident($($v),*)) $($tt)* } }; // Add members to group and continue argument handling with the parent builder (@group ($builder:expr, $group:expr)) => { $builder.group($group) }; // Treat the group builder as an argument to set its attributes (@group ($builder:expr, $group:expr) (@attributes $($attr:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @group ($builder, $crate::clap_app!{ @arg ($group) (-) $($attr)* }) $($tt)* } }; (@group ($builder:expr, $group:expr) (@arg $name:ident: $($tail:tt)*) $($tt:tt)*) => { $crate::clap_app!{ @group ($crate::clap_app!{ @app ($builder) (@arg $name: $($tail)*) }, $group.arg(stringify!($name))) $($tt)* } }; // No more tokens to munch (@arg ($arg:expr) $modes:tt) => { $arg }; // Shorthand tokens influenced by the usage_string (@arg ($arg:expr) $modes:tt --($long:expr) $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.long($long)) $modes $($tail)* } }; (@arg ($arg:expr) $modes:tt --$long:ident $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.long(stringify!($long))) $modes $($tail)* } }; (@arg ($arg:expr) $modes:tt -$short:ident $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.short(stringify!($short).chars().nth(0).unwrap())) $modes $($tail)* } }; (@arg ($arg:expr) (-) <$var:ident> $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.value_name(stringify!($var))) (+) +takes_value +required $($tail)* } }; (@arg ($arg:expr) (+) <$var:ident> $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.value_name(stringify!($var))) (+) $($tail)* } }; (@arg ($arg:expr) (-) [$var:ident] $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.value_name(stringify!($var))) (+) +takes_value $($tail)* } }; (@arg ($arg:expr) (+) [$var:ident] $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.value_name(stringify!($var))) (+) $($tail)* } }; (@arg ($arg:expr) $modes:tt ... $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg) $modes +multiple $($tail)* } }; // Shorthand magic (@arg ($arg:expr) $modes:tt #{$n:expr, $m:expr} $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg) $modes min_values($n) max_values($m) $($tail)* } }; (@arg ($arg:expr) $modes:tt * $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg) $modes +required $($tail)* } }; // !foo -> .foo(false) (@arg ($arg:expr) $modes:tt !$ident:ident $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.$ident(false)) $modes $($tail)* } }; // +foo -> .foo(true) (@arg ($arg:expr) $modes:tt +$ident:ident $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.$ident(true)) $modes $($tail)* } }; // Validator (@arg ($arg:expr) $modes:tt {$fn_:expr} $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.validator($fn_)) $modes $($tail)* } }; (@as_expr $expr:expr) => { $expr }; // Help (@arg ($arg:expr) $modes:tt $desc:tt) => { $arg.help(clap_app!{ @as_expr $desc }) }; // Handle functions that need to be called multiple times for each argument (@arg ($arg:expr) $modes:tt $ident:ident[$($target:ident)*] $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg $( .$ident(stringify!($target)) )*) $modes $($tail)* } }; // Inherit builder's functions (@arg ($arg:expr) $modes:tt $ident:ident($($expr:expr)*) $($tail:tt)*) => { $crate::clap_app!{ @arg ($arg.$ident($($expr)*)) $modes $($tail)* } }; // Build a subcommand outside of an app. (@subcommand $name:ident => $($tail:tt)*) => { $crate::clap_app!{ @app ($crate::App::new(stringify!($name))) $($tail)* } }; (@subcommand ($name:expr) => $($tail:tt)*) => { $crate::clap_app!{ @app ($crate::App::new($name)) $($tail)* } }; // Start the magic (($name:expr) => $($tail:tt)*) => {{ $crate::clap_app!{ @app ($crate::App::new($name)) $($tail)*} }}; ($name:ident => $($tail:tt)*) => {{ $crate::clap_app!{ @app ($crate::App::new(stringify!($name))) $($tail)*} }}; } macro_rules! impl_settings { ($n:ident, $($v:ident => $c:path),+) => { pub fn set(&mut self, s: $n) { match s { $($n::$v => self.0.insert($c)),+ } } pub fn unset(&mut self, s: $n) { match s { $($n::$v => self.0.remove($c)),+ } } pub fn is_set(&self, s: $n) -> bool { match s { $($n::$v => self.0.contains($c)),+ } } }; } // Convenience for writing to stderr thanks to https://github.com/BurntSushi macro_rules! wlnerr( ($($arg:tt)*) => ({ use std::io::{Write, stderr}; writeln!(&mut stderr(), $($arg)*).ok(); }) ); #[cfg(feature = "debug")] #[cfg_attr(feature = "debug", macro_use)] #[cfg_attr(feature = "debug", allow(unused_macros))] mod debug_macros { macro_rules! debugln { ($fmt:expr) => (println!(concat!("DEBUG:clap:", $fmt))); ($fmt:expr, $($arg:tt)*) => (println!(concat!("DEBUG:clap:",$fmt), $($arg)*)); } macro_rules! sdebugln { ($fmt:expr) => (println!($fmt)); ($fmt:expr, $($arg:tt)*) => (println!($fmt, $($arg)*)); } macro_rules! debug { ($fmt:expr) => (print!(concat!("DEBUG:clap:", $fmt))); ($fmt:expr, $($arg:tt)*) => (print!(concat!("DEBUG:clap:",$fmt), $($arg)*)); } macro_rules! sdebug { ($fmt:expr) => (print!($fmt)); ($fmt:expr, $($arg:tt)*) => (print!($fmt, $($arg)*)); } } #[cfg(not(feature = "debug"))] #[cfg_attr(not(feature = "debug"), macro_use)] mod debug_macros { macro_rules! debugln { ($fmt:expr) => {}; ($fmt:expr, $($arg:tt)*) => {}; } macro_rules! sdebugln { ($fmt:expr) => {}; ($fmt:expr, $($arg:tt)*) => {}; } macro_rules! debug { ($fmt:expr) => {}; ($fmt:expr, $($arg:tt)*) => {}; } } // Helper/deduplication macro for printing the correct number of spaces in help messages // used in: // src/args/arg_builder/*.rs // src/app/mod.rs macro_rules! write_nspaces { ($dst:expr, $num:expr) => {{ debugln!("write_spaces!: num={}", $num); for _ in 0..$num { $dst.write_all(b" ")?; } }}; } macro_rules! flags { ($app:expr, $how:ident) => {{ $app.args .args .$how() .filter(|a| { !a.settings.is_set(crate::build::ArgSettings::TakesValue) && a.index.is_none() }) .filter(|a| !a.help_heading.is_some()) }}; ($app:expr) => { flags!($app, iter) }; } #[allow(unused_macros)] macro_rules! flags_mut { ($app:expr) => { $crate::flags!($app, iter_mut) }; } macro_rules! opts { ($app:expr, $how:ident) => {{ $app.args .args .$how() .filter(|a| { a.settings.is_set(crate::build::ArgSettings::TakesValue) && a.index.is_none() }) .filter(|a| !a.help_heading.is_some()) }}; ($app:expr) => { opts!($app, iter) }; } #[allow(unused_macros)] macro_rules! opts_mut { ($app:expr) => { opts!($app, iter_mut) }; } macro_rules! positionals { ($app:expr) => { $app.args .args .iter() .filter(|a| !(a.short.is_some() || a.long.is_some())) }; } #[allow(unused_macros)] macro_rules! positionals_mut { ($app:expr) => { $app.args .values_mut() .filter(|a| !(a.short.is_some() || a.long.is_some())) }; } #[allow(unused_macros)] macro_rules! custom_headings_mut { ($app:expr) => { custom_headings!($app, values_mut) }; } macro_rules! subcommands_cloned { ($app:expr, $how:ident) => { $app.subcommands.$how().cloned() }; ($app:expr) => { subcommands_cloned!($app, iter) }; } macro_rules! subcommands { ($app:expr, $how:ident) => { $app.subcommands.$how() }; ($app:expr) => { subcommands!($app, iter) }; } macro_rules! subcommands_mut { ($app:expr) => { subcommands!($app, iter_mut) }; } macro_rules! groups_for_arg { ($app:expr, $grp:expr) => {{ debugln!("Parser::groups_for_arg: name={}", $grp); $app.groups .iter() .filter(|grp| grp.args.iter().any(|&a| a == $grp)) .map(|grp| grp.id) }}; } macro_rules! find_subcmd_cloned { ($_self:expr, $sc:expr) => {{ subcommands_cloned!($_self).find(|a| match_alias!(a, $sc, &*a.name)) }}; } macro_rules! find_subcmd { ($app:expr, $sc:expr) => {{ subcommands!($app).find(|a| match_alias!(a, $sc, &*a.name)) }}; } macro_rules! longs { ($app:expr) => {{ use crate::mkeymap::KeyType; $app.args.keys.iter().map(|x| &x.key).filter_map(|a| { if let KeyType::Long(v) = a { Some(v) } else { None } }) }}; } macro_rules! _names { (@args $app:expr) => {{ $app.args.args.iter().map(|a| &*a.name) }}; (@sc $app:expr) => {{ $app.subcommands.iter().map(|s| &*s.name).chain( $app.subcommands .iter() .filter(|s| s.aliases.is_some()) .flat_map(|s| s.aliases.as_ref().unwrap().iter().map(|&(n, _)| n)), ) }}; } macro_rules! sc_names { ($app:expr) => {{ _names!(@sc $app) }}; } macro_rules! match_alias { ($a:expr, $to:expr, $what:expr) => {{ $what == $to || ($a.aliases.is_some() && $a .aliases .as_ref() .unwrap() .iter() .any(|alias| alias.0 == $to)) }}; }
33.86236
128
0.513701
db451ae4479fbf233409910a41b9fec8320078a3
5,563
use super::*; use crate::common_description::*; use crate::media_description::*; use crate::session_description::*; fn get_test_session_description() -> SessionDescription { return SessionDescription{ media_descriptions: vec![ MediaDescription { media_name: MediaName { media: "video".to_string(), port: RangedPort { value: 51372, range: None, }, protos: vec!["RTP".to_string(), "AVP".to_string()], formats: vec!["120".to_string(), "121".to_string(), "126".to_string(), "97".to_string()], }, attributes: vec![ Attribute::new("fmtp:126 profile-level-id=42e01f;level-asymmetry-allowed=1;packetization-mode=1".to_string(), None), Attribute::new("fmtp:97 profile-level-id=42e01f;level-asymmetry-allowed=1".to_string(), None), Attribute::new("fmtp:120 max-fs=12288;max-fr=60".to_string(), None), Attribute::new("fmtp:121 max-fs=12288;max-fr=60".to_string(), None), Attribute::new("rtpmap:120 VP8/90000".to_string(), None), Attribute::new("rtpmap:121 VP9/90000".to_string(), None), Attribute::new("rtpmap:126 H264/90000".to_string(), None), Attribute::new("rtpmap:97 H264/90000".to_string(), None), Attribute::new("rtcp-fb:97 ccm fir".to_string(), None), Attribute::new("rtcp-fb:97 nack".to_string(), None), Attribute::new("rtcp-fb:97 nack pli".to_string(), None), ], ..Default::default() }, ], ..Default::default() }; } #[test] fn test_get_payload_type_for_vp8() -> Result<(), Error> { let tests = vec![ ( Codec { name: "VP8".to_string(), ..Default::default() }, 120, ), ( Codec { name: "VP9".to_string(), ..Default::default() }, 121, ), ( Codec { name: "H264".to_string(), fmtp: "profile-level-id=42e01f;level-asymmetry-allowed=1".to_string(), ..Default::default() }, 97, ), ( Codec { name: "H264".to_string(), fmtp: "level-asymmetry-allowed=1;profile-level-id=42e01f".to_string(), ..Default::default() }, 97, ), ( Codec { name: "H264".to_string(), fmtp: "profile-level-id=42e01f;level-asymmetry-allowed=1;packetization-mode=1" .to_string(), ..Default::default() }, 126, ), ]; for (codec, expected) in tests { let sdp = get_test_session_description(); let actual = sdp.get_payload_type_for_codec(&codec)?; assert_eq!(actual, expected); } Ok(()) } #[test] fn test_get_codec_for_payload_type() -> Result<(), Error> { let tests: Vec<(u8, Codec)> = vec![ ( 120, Codec { payload_type: 120, name: "VP8".to_string(), clock_rate: 90000, fmtp: "max-fs=12288;max-fr=60".to_string(), ..Default::default() }, ), ( 121, Codec { payload_type: 121, name: "VP9".to_string(), clock_rate: 90000, fmtp: "max-fs=12288;max-fr=60".to_string(), ..Default::default() }, ), ( 126, Codec { payload_type: 126, name: "H264".to_string(), clock_rate: 90000, fmtp: "profile-level-id=42e01f;level-asymmetry-allowed=1;packetization-mode=1" .to_string(), ..Default::default() }, ), ( 97, Codec { payload_type: 97, name: "H264".to_string(), clock_rate: 90000, fmtp: "profile-level-id=42e01f;level-asymmetry-allowed=1".to_string(), rtcp_feedback: vec![ "ccm fir".to_string(), "nack".to_string(), "nack pli".to_string(), ], ..Default::default() }, ), ]; for (payload_type, expected) in &tests { let sdp = get_test_session_description(); let actual = sdp.get_codec_for_payload_type(*payload_type)?; assert_eq!(actual, *expected); } Ok(()) } #[test] fn test_new_session_id() -> Result<(), Error> { let mut min = 0x7FFFFFFFFFFFFFFFu64; let mut max = 0u64; for _ in 0..10000 { let r = new_session_id(); if r > (1 << 63) - 1 { assert!(false, "Session ID must be less than 2**64-1, got {}", r) } if r < min { min = r } if r > max { max = r } } if min > 0x1000000000000000 { assert!(false, "Value around lower boundary was not generated") } if max < 0x7000000000000000 { assert!(false, "Value around upper boundary was not generated") } Ok(()) }
31.252809
136
0.459285
9c8a4b5ae6a2d355dfe97a9e2d19a023236893fa
1,091
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. struct CustomAutoRooterVFTable { trace: unsafe extern "C" fn(this: *mut i32, trc: *mut u32), } unsafe trait CustomAutoTraceable: Sized { const vftable: CustomAutoRooterVFTable = CustomAutoRooterVFTable { trace: Self::trace, }; unsafe extern "C" fn trace(this: *mut i32, trc: *mut u32) { let this = this as *const Self; let this = this.as_ref().unwrap(); Self::do_trace(this, trc); } fn do_trace(&self, trc: *mut u32); } unsafe impl CustomAutoTraceable for () { fn do_trace(&self, _: *mut u32) { // nop } } fn main() { let _ = <()>::vftable; }
28.710526
70
0.661778
9c8db34f441a194fecfae5db73849f68bb3d5eb8
7,427
// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #![allow(clippy::len_zero)] use crate::{ asan::{add_asan_log_env, check_asan_path, AsanLog}, cmd::run_cmd, expand::Expand, }; use anyhow::{Error, Result}; use std::{collections::HashMap, path::Path, time::Duration}; use tempfile::tempdir; const DEFAULT_TIMEOUT_SECS: u64 = 5; const CRASH_SITE_UNAVAILABLE: &str = "<crash site unavailable>"; pub struct Tester<'a> { exe_path: &'a Path, arguments: &'a [String], environ: &'a HashMap<String, String>, timeout: Duration, check_asan_log: bool, check_debugger: bool, check_retry_count: u64, } #[derive(Debug)] pub struct Crash { pub call_stack: Vec<String>, pub crash_type: String, pub crash_site: String, } #[derive(Debug)] pub struct TestResult { pub crash: Option<Crash>, pub asan_log: Option<AsanLog>, pub error: Option<Error>, } impl<'a> Tester<'a> { pub fn new( exe_path: &'a Path, arguments: &'a [String], environ: &'a HashMap<String, String>, timeout: &'a Option<u64>, check_asan_log: bool, check_debugger: bool, check_retry_count: u64, ) -> Self { let timeout = Duration::from_secs(timeout.unwrap_or(DEFAULT_TIMEOUT_SECS)); Self { exe_path, arguments, environ, timeout, check_asan_log, check_debugger, check_retry_count, } } #[cfg(target_os = "windows")] async fn test_input_debugger( &self, argv: Vec<String>, env: HashMap<String, String>, ) -> Result<Option<Crash>> { const IGNORE_FIRST_CHANCE_EXCEPTIONS: bool = true; let report = input_tester::crash_detector::test_process( self.exe_path, &argv, &env, self.timeout, IGNORE_FIRST_CHANCE_EXCEPTIONS, None, )?; let crash = if let Some(exception) = report.exceptions.last() { let call_stack: Vec<_> = exception .stack_frames .iter() .map(|f| f.to_string()) .collect(); let crash_site = if let Some(frame) = call_stack.iter().next() { frame.to_string() } else { CRASH_SITE_UNAVAILABLE.to_owned() }; let crash_type = exception.description.to_string(); Some(Crash { call_stack, crash_type, crash_site, }) } else { bail!("{}", report.exit_status); }; Ok(crash) } #[cfg(target_os = "linux")] async fn test_input_debugger( &self, mut argv: Vec<String>, env: HashMap<String, String>, ) -> Result<Option<Crash>> { argv.insert(0, self.exe_path.display().to_string()); let (sender, receiver) = std::sync::mpsc::channel(); // Create two async tasks: one off-thread task for the blocking triage run, // and one task that will kill the triage target if we time out. let triage = tokio::task::spawn_blocking(move || { // Spawn a triage run, but stop it before execing. // // This calls a blocking `wait()` internally, on the forked child. let triage = crate::triage::TriageCommand::new(argv, env)?; // Share the new child ID with main thread. sender.send(triage.pid())?; // The target run is blocking, and may hang. triage.run() }); // Save the new process ID of the spawned triage target, so we can try to kill // the (possibly hung) target out-of-band, if we time out. let target_pid = receiver.recv()?; let timeout = tokio::time::timeout(self.timeout, triage).await; let crash = if timeout.is_err() { use nix::sys::signal::{kill, Signal}; // Yes. Try to kill the target process, if hung. kill(target_pid, Signal::SIGKILL)?; bail!("process timed out"); } else { let report = timeout???; if let Some(crash) = report.crashes.last() { let crash_thread = crash .threads .get(&crash.tid.as_raw()) .ok_or_else(|| anyhow!("no thread info for crash thread ID = {}", crash.tid))?; let call_stack: Vec<_> = crash_thread .callstack .iter() .enumerate() .map(|(idx, frame)| format!("#{} {}", idx, frame)) .collect(); let crash_type = crash.signal.to_string(); let crash_site = if let Some(frame) = crash_thread.callstack.get(0) { frame.to_string() } else { CRASH_SITE_UNAVAILABLE.to_owned() }; Some(Crash { call_stack, crash_type, crash_site, }) } else { None } }; Ok(crash) } pub async fn test_input(&self, input_file: impl AsRef<Path>) -> Result<TestResult> { let asan_dir = if self.check_asan_log { Some(tempdir()?) } else { None }; let (argv, env) = { let mut expand = Expand::new(); expand .input(input_file) .target_exe(&self.exe_path) .target_options(&self.arguments); let argv = expand.evaluate(&self.arguments)?; let mut env: HashMap<String, String> = HashMap::new(); for (k, v) in self.environ { env.insert(k.clone(), expand.evaluate_value(v)?); } if let Some(asan_dir) = &asan_dir { add_asan_log_env(&mut env, asan_dir.path()); } (argv, env) }; let mut crash = None; let mut error = None; let mut asan_log = None; let attempts = 1 + self.check_retry_count; for _ in 0..attempts { let result = if self.check_debugger { match self.test_input_debugger(argv.clone(), env.clone()).await { Ok(crash) => (crash, None), Err(error) => (None, Some(error)), } } else { match run_cmd(self.exe_path, argv.clone(), &env, self.timeout).await { Ok(_) => (None, None), Err(error) => (None, Some(error)), } }; crash = result.0; error = result.1; asan_log = if let Some(asan_dir) = &asan_dir { check_asan_path(asan_dir.path()).await? } else { None }; if crash.is_some() || asan_log.is_some() { break; } } Ok(TestResult { crash, asan_log, error, }) } pub async fn is_crash(&self, input_file: impl AsRef<Path>) -> Result<bool> { let test_result = self.test_input(input_file).await?; Ok(test_result.crash.is_some() || test_result.asan_log.is_some()) } }
29.708
99
0.507069
0a0aae9d1d8c2bb794aaf92fee6204a41c06d115
3,057
use super::*; /// https://webassembly.github.io/spec/core/syntax/types.html#function-types #[derive(Debug, Clone)] pub struct FunctionType { pub param_types: ParamsType, pub return_types: ResultType, } impl FunctionType { pub const ID: u32 = 0x60; /// Function id pub fn id() -> u32 { Self::ID } // [0x60, cnt, .., cnt, .., 0x60, cnt, .., cnt, ..] pub(crate) fn from_bytes(bytes: &[u8]) -> Self { // the most simple function like this => func() -> (), no param, no return // [0x60, 0, 0] assert!(bytes.len() >= 3); assert_eq!(bytes[0] as u32, Self::ID); // params let params_count = bytes[1] as usize; let param_types = { if params_count == 0 { vec![] } else { let mut param_types = Vec::with_capacity(params_count); let params_bytes = &bytes[2..2 + params_count]; for ty in params_bytes { let param = ValueType::NumType(NumberType::from(*ty)); param_types.push(param); } param_types } }; // returns let return_bytes = &bytes[1 + 1 + params_count..]; let return_count = return_bytes[0] as usize; let return_types = { if return_count == 0 { vec![] } else { let mut return_types = Vec::with_capacity(return_count); for ty in &return_bytes[1..] { let _return = ValueType::NumType(NumberType::from(*ty)); return_types.push(_return); } return_types } }; Self { param_types, return_types, } } } #[cfg(test)] mod tests { use super::*; #[test] fn function_no_param_no_return_should_work() { let func_bytes = [0x60, 0, 0u8]; let func = FunctionType::from_bytes(&func_bytes); assert_eq!(func.param_types.len(), 0); assert_eq!(func.return_types.len(), 0); } #[test] fn function_no_param_one_return_should_work() { let func_bytes = [0x60, 0, 1, 0x7c]; let func = FunctionType::from_bytes(&func_bytes); assert_eq!(func.param_types.len(), 0); assert_eq!(func.return_types.len(), 1); } #[test] fn function_one_param_no_return_should_work() { let func_bytes = [0x60, 1, 0x7c, 0]; let func = FunctionType::from_bytes(&func_bytes); assert_eq!(func.param_types.len(), 1); assert_eq!(func.return_types.len(), 0); } #[test] fn function_four_params_four_return_should_work() { // In latest webassembly spec, the feature multi values is stablized. let func_bytes = [0x60, 4, 0x7c, 0x7d, 0x7e, 0x7f, 4, 0x7c, 0x7d, 0x7e, 0x7f]; let func = FunctionType::from_bytes(&func_bytes); assert_eq!(func.param_types.len(), 4); assert_eq!(func.return_types.len(), 4); } }
29.114286
86
0.539091
c17b18d301f536ed44308973ca4f697613539bfa
629
// Copyright 2022 tison <[email protected]>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod liveness; mod string;
37
75
0.750397
6af462ae1c043e383c9d9f7194e3b40765a428bd
11,535
use ark_bls12_381::Fr; use ark_poly::{univariate::DensePolynomial as DensePoly, EvaluationDomain, GeneralEvaluationDomain}; use num_traits::identities::{One, Zero}; use ark_poly::UVPolynomial; use ark_poly::Polynomial; // The quotient polynomial will encode the four checks for the multiset equality argument // These checks are: // 1) Z(X) evaluated at the first root of unity is 1 // 2) Z(X) is correct accumulated. z_(Xg) * g(X) = (1+beta)^n * z(X) * f(X) // 3) The last element of h_1(x) is equal to the first element of h_2(x) // 4) Z(x) evaluated at the last root of unity is 1 // // We can denote check 1 and check 4 as point checks because they are checking the evaluation of Z(x) at a specific point // We can denote check 3 as an interval check because it checks whether h_1 and h_2 combined form 's' without any gaps. See paper for more details on 's' // We can denote check 2 as the term check // // Notice that the term check equation will determine the degree of the quotient polynomial // We can compute it by adding the degrees of Z(x), f(x) and t(x). // deg(Z(x)) = n because it has n + 1 elements // deg(f(x)) = n although it has n elements, we must zero pad to ensure that f(x) evaluated on the n+1'th element is zero // deg(t(x)) = n because we define it to have n + 1 elements. // Summing the degrees gives us n + n + n = 3n // However, similar to [GWC19](PLONK) we must divide by the vanishing polynomial // So the degree of the quotient polynomial Q(x) is 3n - n = 2n // Significance: Adding this protocol into PLONK will not "blow up" the degree of the quotient polynomial // Where "blow up" denotes increasing the overall degree past 4n for standard plonk pub fn compute( domain: &GeneralEvaluationDomain<Fr>, z_poly: &DensePoly<Fr>, f_poly: &DensePoly<Fr>, t_poly: &DensePoly<Fr>, h_1_poly: &DensePoly<Fr>, h_2_poly: &DensePoly<Fr>, beta: Fr, gamma: Fr, ) -> (DensePoly<Fr>, DensePoly<Fr>) { // 1. Compute Point check polynomial let point_check = compute_point_checks(z_poly, domain); //2. Compute interval check polynomial let interval_check = compute_interval_check(h_1_poly, h_2_poly, domain); //3. Compute term check polynomial let term_check = compute_term_check( domain, z_poly, f_poly, t_poly, h_1_poly, h_2_poly, beta, gamma, ); // Compute quotient polynomial let sum = &(&interval_check + &point_check) + &term_check; sum.divide_by_vanishing_poly(*domain).unwrap() } fn compute_point_checks(z_poly: &DensePoly<Fr>, domain: &GeneralEvaluationDomain<Fr>) -> DensePoly<Fr> { // Compute lagrange polynomials let l1_poly = compute_n_lagrange_poly(domain, 0); let ln_poly = compute_n_lagrange_poly(domain, domain.size() - 1); // Compute Z'(X) = Z(x) - 1 let z_prime_poly = z_poly - &DensePoly::from_coefficients_vec(vec![Fr::one()]); // We can batch the two point checks into one with the following: (Z(X)-1)[L_1(x) + L_n(x)] // let l_poly = &l1_poly + &ln_poly; &z_prime_poly * &l_poly } fn compute_interval_check( h_1_poly: &DensePoly<Fr>, h_2_poly: &DensePoly<Fr>, domain: &GeneralEvaluationDomain<Fr>, ) -> DensePoly<Fr> { // Increase domain size by two let domain_2n = GeneralEvaluationDomain::<Fr>::new(2 * domain.size()).unwrap(); // Compute last lagrange polynomial in evaluation form let ln_evals = compute_n_lagrange_evaluations(domain.size(), domain.size() - 1); let ln_2n_evals = domain_2n.fft(&domain.ifft(&ln_evals)); // Convert h_1 and h_2 to evaluation form let h_1_evals = domain_2n.fft(&h_1_poly); let mut h_2_evals = domain_2n.fft(&h_2_poly); // We need h_2(x * g) so push 2 extra elements into the domain h_2_evals.push(h_2_evals[0]); h_2_evals.push(h_2_evals[1]); // Compute [L_n(x)](h_1(x) - h_2(x * g)) let i_evals: Vec<_> = (0..domain_2n.size()) .into_iter() .map(|i| { let ln_i = ln_2n_evals[i]; let h_1_i = h_1_evals[i]; let h_2_i_next = h_2_evals[i + 2]; ln_i * (h_1_i - h_2_i_next) }) .collect(); // Convert the evaluations for our point check to coefficient form let i_poly = DensePoly::from_coefficients_vec(domain_2n.ifft(&i_evals)); i_poly } pub fn compute_term_check( domain: &GeneralEvaluationDomain<Fr>, z_poly: &DensePoly<Fr>, f_poly: &DensePoly<Fr>, t_poly: &DensePoly<Fr>, h_1_poly: &DensePoly<Fr>, h_2_poly: &DensePoly<Fr>, beta: Fr, gamma: Fr, ) -> DensePoly<Fr> { // The equation for this is quite big. Similar to PLONK, we can split the point check into two. // The first part will compute the grand product Z(X) term // The second part will compute the grand product Z(Xg) term // First Part let part_a = compute_term_check_a(domain, z_poly, f_poly, t_poly, beta, gamma); // Second part let part_b = compute_term_check_b(domain, z_poly, h_1_poly, h_2_poly, beta, gamma); &part_a - &part_b } // This computes the grand product term for Z(X) or F(\beta, \gamma) fn compute_term_check_a( domain: &GeneralEvaluationDomain<Fr>, z_poly: &DensePoly<Fr>, f_poly: &DensePoly<Fr>, t_poly: &DensePoly<Fr>, beta: Fr, gamma: Fr, ) -> DensePoly<Fr> { // Increase the domain size by 4 let domain_4n = &GeneralEvaluationDomain::<Fr>::new(4 * domain.size()).unwrap(); // Convert all polynomials into evaluation form let z_evals = domain_4n.fft(&z_poly); let f_evals = domain_4n.fft(f_poly); let mut t_evals = domain_4n.fft(t_poly); // Add four terms to the t(x) evaluations as we need to compute t(Xg) t_evals.push(t_evals[0]); t_evals.push(t_evals[1]); t_evals.push(t_evals[2]); t_evals.push(t_evals[3]); let beta_one = Fr::one() + beta; // Compute the last element in the domain let g_n = domain.elements().last().unwrap(); let i_evals: Vec<_> = (0..domain_4n.size()) .into_iter() .zip(domain_4n.elements()) .map(|(i, root_i)| { let z_i = z_evals[i]; let f_i = f_evals[i]; let t_i = t_evals[i]; let t_i_next = t_evals[i + 4]; // Compute X - g^n let a = root_i - g_n; // Compute Z(X)(1+beta) let b = z_i * beta_one; // Compute gamma + f(X) let c = gamma + f_i; // Compute gamma(1+beta) +t(x) + beta * t(Xg) let d = (gamma * beta_one) + t_i + (beta * t_i_next); a * b * c * d }) .collect(); // Convert the evaluations for our term check to coefficient form let i_poly = DensePoly::from_coefficients_vec(domain_4n.ifft(&i_evals)); assert_eq!( i_poly.evaluate(&domain.elements().last().unwrap()), Fr::zero() ); i_poly } // This computes the grand product term for Z(Xg) or G(\beta, \gamma) fn compute_term_check_b( domain: &GeneralEvaluationDomain<Fr>, z_poly: &DensePoly<Fr>, h_1_poly: &DensePoly<Fr>, h_2_poly: &DensePoly<Fr>, beta: Fr, gamma: Fr, ) -> DensePoly<Fr> { // Increase the domain size by 4 let domain_4n = &GeneralEvaluationDomain::<Fr>::new(4 * domain.size()).unwrap(); // Convert all polynomials into evaluation form, then add four terms to each evaluation as we need to compute their evaluations at the next root of unity let mut z_evals = domain_4n.fft(z_poly); z_evals.push(z_evals[0]); z_evals.push(z_evals[1]); z_evals.push(z_evals[2]); z_evals.push(z_evals[3]); let mut h_1_evals = domain_4n.fft(h_1_poly); h_1_evals.push(h_1_evals[0]); h_1_evals.push(h_1_evals[1]); h_1_evals.push(h_1_evals[2]); h_1_evals.push(h_1_evals[3]); let mut h_2_evals = domain_4n.fft(h_2_poly); h_2_evals.push(h_2_evals[0]); h_2_evals.push(h_2_evals[1]); h_2_evals.push(h_2_evals[2]); h_2_evals.push(h_2_evals[3]); // Compute (1 + beta) let beta_one = Fr::one() + beta; // Compute the last element in the domain let g_n = domain.elements().last().unwrap(); let i_evals: Vec<_> = (0..domain_4n.size()) .into_iter() .zip(domain_4n.elements()) .map(|(i, root_i)| { let z_i_next = z_evals[i + 4]; let h_1_i = h_1_evals[i]; let h_1_i_next = h_1_evals[i + 4]; let h_2_i = h_2_evals[i]; let h_2_i_next = h_2_evals[i + 4]; // Compute (X - g^n) Z(Xg) let a = (root_i - g_n) * z_i_next; // Compute [gamma * (1+beta)] + h_1(x) + beta * h_1(Xg) let b = (gamma * beta_one) + h_1_i + (beta * h_1_i_next); // Compute [gamma * (1+beta)] + h_2(x) + beta * h_2(Xg) let c = (gamma * beta_one) + h_2_i + (beta * h_2_i_next); a * b * c }) .collect(); // Convert the evaluations for our term check to coefficient form let i_poly = DensePoly::from_coefficients_vec(domain_4n.ifft(&i_evals)); assert_eq!( i_poly.evaluate(&domain.elements().last().unwrap()), Fr::zero() ); i_poly } // Computes the n'th lagrange poly for a particular domain // Easiest way is to compute the evaluation points, which will be zero at every position except for n // Then IFFT to get the coefficient form // Note: n=0 is the first lagrange polynomial and n = domain.size() -1 is the last lagrange polynomial pub fn compute_n_lagrange_poly(domain: &GeneralEvaluationDomain<Fr>, n: usize) -> DensePoly<Fr> { assert!(n <= domain.size() - 1); let mut evaluations = compute_n_lagrange_evaluations(domain.size(), n); domain.ifft_in_place(&mut evaluations); DensePoly::from_coefficients_vec(evaluations) } fn compute_n_lagrange_evaluations(domain_size: usize, n: usize) -> Vec<Fr> { let mut lagrange_evaluations = vec![Fr::zero(); domain_size]; lagrange_evaluations[n] = Fr::one(); lagrange_evaluations } #[cfg(test)] mod test { use super::*; use crate::multiset::{multiset_equality::*, MultiSet}; use ark_poly::UVPolynomial; use ark_poly::univariate::DensePolynomial as DensePoly; #[test] fn test_quotient_poly() { // Compute f let mut f = MultiSet::new(); f.push(Fr::from(2u8)); f.push(Fr::from(3u8)); f.push(Fr::from(4u8)); // Compute t let mut t = MultiSet::new(); t.push(Fr::from(2u8)); t.push(Fr::from(3u8)); t.push(Fr::from(4u8)); t.push(Fr::from(5u8)); // Setup domain let domain = EvaluationDomain::new(f.len()).unwrap(); let beta = Fr::from(10u8); let gamma = Fr::from(11u8); // Compute h_1 and h_2 let (h_1, h_2) = compute_h1_h2(&f, &t); // Convert h_1 and h_2 to polynomials let h_1_poly = h_1.to_polynomial(&domain); let h_2_poly = h_2.to_polynomial(&domain); // Compute f(x) let f_poly = f.to_polynomial(&domain); assert_eq!(f_poly.degree(), f.len()); // Compute t(x) let t_poly = t.to_polynomial(&domain); assert_eq!(t_poly.degree(), t.len() - 1); // Compute Z(x) poly let z_evaluations = compute_accumulator_values(&f, &t, &h_1, &h_2, beta, gamma); let z_poly = DensePoly::from_coefficients_vec(domain.ifft(&z_evaluations)); let (_, remainder) = compute( &domain, &z_poly, &f_poly, &t_poly, &h_1_poly, &h_2_poly, beta, gamma, ); assert!(remainder.is_zero()); } }
36.273585
157
0.633117
2fb4314cda2e2e4a94c5478244d559fcf5ab2c48
5,274
use futures::{try_ready, Async, Future, Poll, Stream}; use linkerd2_error::{Error, Never}; use std::fmt; use std::time::{Duration, Instant}; use tokio::sync::{mpsc, oneshot}; use tokio::timer::Delay; use tower::discover; use tracing_futures::Instrument; #[derive(Clone, Debug)] pub struct Buffer<M> { capacity: usize, watchdog_timeout: Duration, inner: M, } #[derive(Debug)] pub struct Discover<K, S> { rx: mpsc::Receiver<discover::Change<K, S>>, _disconnect_tx: oneshot::Sender<Never>, } pub struct DiscoverFuture<F, D> { future: F, capacity: usize, watchdog_timeout: Duration, _marker: std::marker::PhantomData<fn() -> D>, } pub struct Daemon<D: discover::Discover> { discover: D, disconnect_rx: oneshot::Receiver<Never>, tx: mpsc::Sender<discover::Change<D::Key, D::Service>>, watchdog: Option<Delay>, watchdog_timeout: Duration, } #[derive(Clone, Debug)] pub struct Lost(()); impl<M> Buffer<M> { pub fn new<T>(capacity: usize, watchdog_timeout: Duration, inner: M) -> Self where Self: tower::Service<T>, { Self { capacity, watchdog_timeout, inner, } } } impl<T, M, D> tower::Service<T> for Buffer<M> where T: fmt::Display, M: tower::Service<T, Response = D>, D: discover::Discover + Send + 'static, D::Error: Into<Error>, D::Key: Send, D::Service: Send, { type Response = Discover<D::Key, D::Service>; type Error = M::Error; type Future = DiscoverFuture<M::Future, M::Response>; fn poll_ready(&mut self) -> Poll<(), Self::Error> { self.inner.poll_ready() } fn call(&mut self, req: T) -> Self::Future { let future = self.inner.call(req); Self::Future { future, capacity: self.capacity, watchdog_timeout: self.watchdog_timeout, _marker: std::marker::PhantomData, } } } impl<F, D> Future for DiscoverFuture<F, D> where F: Future<Item = D>, D: discover::Discover + Send + 'static, D::Error: Into<Error>, D::Key: Send, D::Service: Send, { type Item = Discover<D::Key, D::Service>; type Error = F::Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { let discover = try_ready!(self.future.poll()); let (tx, rx) = mpsc::channel(self.capacity); let (_disconnect_tx, disconnect_rx) = oneshot::channel(); let fut = Daemon { discover, disconnect_rx, tx, watchdog_timeout: self.watchdog_timeout, watchdog: None, }; tokio::spawn(fut.in_current_span()); Ok(Discover { rx, _disconnect_tx }.into()) } } impl<D> Future for Daemon<D> where D: discover::Discover, D::Error: Into<Error>, { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { loop { match self.disconnect_rx.poll() { Ok(Async::NotReady) => {} Err(_lost) => return Ok(().into()), Ok(Async::Ready(n)) => match n {}, } // The watchdog bounds the amount of time that the send buffer stays // full. This is designed to release the `discover` resources, i.e. // if we expect that the receiver has leaked. match self.tx.poll_ready() { Ok(Async::Ready(())) => { self.watchdog = None; } Err(_) => { tracing::trace!("lost sender"); return Err(()); } Ok(Async::NotReady) => { let mut watchdog = self .watchdog .take() .unwrap_or_else(|| Delay::new(Instant::now() + self.watchdog_timeout)); if watchdog.poll().expect("timer must not fail").is_ready() { tracing::warn!( timeout = ?self.watchdog_timeout, "dropping resolution due to watchdog", ); return Err(()); } self.watchdog = Some(watchdog); return Ok(Async::NotReady); } } let up = try_ready!(self.discover.poll().map_err(|e| { let e: Error = e.into(); tracing::debug!("resoution lost: {}", e); })); self.tx.try_send(up).ok().expect("sender must be ready"); } } } impl<K: std::hash::Hash + Eq, S> tower::discover::Discover for Discover<K, S> { type Key = K; type Service = S; type Error = Error; fn poll(&mut self) -> Poll<tower::discover::Change<K, S>, Self::Error> { return match self.rx.poll() { Ok(Async::NotReady) => Ok(Async::NotReady), Ok(Async::Ready(Some(change))) => Ok(Async::Ready(change)), Err(_) | Ok(Async::Ready(None)) => Err(Lost(()).into()), }; } } impl std::fmt::Display for Lost { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "discovery task failed") } } impl std::error::Error for Lost {}
28.354839
95
0.523132
750c0cb765ee6545d4fadaa1397a6399824907c7
5,702
// Copyright 2020 Shift Crypto AG // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use core::convert::TryInto; use core::fmt::Write; use util::c_types::size_t; use bitbox02_rust::apps::bitcoin::util::format_amount; use bitbox02_rust::apps::bitcoin::{bip143, keypath}; /// # Safety /// `keypath` must be not NULL and contain `keypath_len` u32 elements. #[no_mangle] pub unsafe extern "C" fn rust_bitcoin_keypath_validate_account( keypath: *const u32, keypath_len: size_t, expected_purpose: u32, expected_coin: u32, ) -> bool { let keypath = core::slice::from_raw_parts(keypath, keypath_len); bitbox02_rust::apps::bitcoin::keypath::validate_account( keypath, expected_purpose, expected_coin, ) .is_ok() } /// # Safety /// `keypath` must be not NULL and contain `keypath_len` u32 elements. #[no_mangle] pub unsafe extern "C" fn rust_bitcoin_keypath_validate_account_multisig( keypath: *const u32, keypath_len: size_t, expected_coin: u32, script_type: i32, ) -> bool { let script_type = match keypath::MultisigScriptType::from_i32(script_type) { Some(script_type) => script_type, None => return false, }; let keypath = core::slice::from_raw_parts(keypath, keypath_len); keypath::validate_account_multisig(keypath, expected_coin, script_type).is_ok() } /// # Safety /// `keypath` must be not NULL and contain `keypath_len` u32 elements. #[no_mangle] pub unsafe extern "C" fn rust_bitcoin_keypath_validate_address_multisig( keypath: *const u32, keypath_len: size_t, expected_coin: u32, script_type: i32, ) -> bool { let script_type = match keypath::MultisigScriptType::from_i32(script_type) { Some(script_type) => script_type, None => return false, }; let keypath = core::slice::from_raw_parts(keypath, keypath_len); keypath::validate_address_multisig(keypath, expected_coin, script_type).is_ok() } /// # Safety /// `keypath` must be not NULL and contain `keypath_len` u32 elements. #[no_mangle] pub unsafe extern "C" fn rust_bitcoin_keypath_validate_account_simple( keypath: *const u32, keypath_len: size_t, expected_coin: u32, script_type: i32, ) -> bool { let script_type = match keypath::SimpleType::from_i32(script_type) { Some(script_type) => script_type, None => return false, }; let keypath = core::slice::from_raw_parts(keypath, keypath_len); keypath::validate_account_simple(keypath, expected_coin, script_type).is_ok() } /// # Safety /// `keypath` must be not NULL and contain `keypath_len` u32 elements. #[no_mangle] pub unsafe extern "C" fn rust_bitcoin_keypath_validate_xpub( keypath: *const u32, keypath_len: size_t, expected_coin: u32, ) -> bool { let keypath = core::slice::from_raw_parts(keypath, keypath_len); keypath::validate_xpub(keypath, expected_coin).is_ok() } /// # Safety /// `keypath` must be not NULL and contain `keypath_len` u32 elements. #[no_mangle] pub unsafe extern "C" fn rust_bitcoin_keypath_validate_address_simple( keypath: *const u32, keypath_len: size_t, expected_coin: u32, script_type: i32, ) -> bool { let script_type = match keypath::SimpleType::from_i32(script_type) { Some(script_type) => script_type, None => return false, }; let keypath = core::slice::from_raw_parts(keypath, keypath_len); keypath::validate_address_simple(keypath, expected_coin, script_type).is_ok() } /// `out` should be at least 31+len(unit) bytes. #[no_mangle] pub extern "C" fn rust_bitcoin_util_format_amount( satoshi: u64, unit: crate::util::CStr, mut out: crate::util::CStrMut, ) { let result = format_amount(satoshi, unit.as_ref()); out.write_str(&result).unwrap(); } #[repr(C)] pub struct Bip143Args { version: u32, hash_prevouts: *const u8, hash_sequence: *const u8, outpoint_hash: *const u8, outpoint_index: u32, sighash_script: crate::util::Bytes, prevout_value: u64, sequence: u32, hash_outputs: *const u8, locktime: u32, sighash_flags: u32, } /// # Safety /// The *const u8 buffers must be valid 32 byte buffers. #[no_mangle] pub unsafe extern "C" fn rust_bitcoin_bip143_sighash( args: &Bip143Args, mut hash_out: crate::util::BytesMut, ) { let hash = bip143::sighash(&bip143::Args { version: args.version, hash_prevouts: core::slice::from_raw_parts(args.hash_prevouts, 32) .try_into() .unwrap(), hash_sequence: core::slice::from_raw_parts(args.hash_sequence, 32) .try_into() .unwrap(), outpoint_hash: core::slice::from_raw_parts(args.outpoint_hash, 32) .try_into() .unwrap(), outpoint_index: args.outpoint_index, sighash_script: args.sighash_script.as_ref(), prevout_value: args.prevout_value, sequence: args.sequence, hash_outputs: core::slice::from_raw_parts(args.hash_outputs, 32) .try_into() .unwrap(), locktime: args.locktime, sighash_flags: args.sighash_flags, }); hash_out.as_mut().copy_from_slice(&hash[..]); }
32.214689
83
0.68625
644a34e6d1a6b36960357a74d347f9c5c90a9a90
4,474
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use mirai_annotations::debug_checked_precondition; use serde::{Serialize, Serializer}; use std::{fmt, str}; use thiserror::Error; /// An efficient container for formatting a byte slice as a hex-formatted string, /// stored on the stack. /// /// Using `ShortHexStr` instead of `hex::encode` is about 3-4x faster on a recent /// MBP 2019 (~48 ns/iter vs ~170 ns/iter) in an artifical micro benchmark. #[derive(Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)] pub struct ShortHexStr([u8; ShortHexStr::LENGTH]); #[derive(Error, Debug)] #[error("Input bytes are too short")] pub struct InputTooShortError; impl ShortHexStr { pub const SOURCE_LENGTH: usize = 4; pub const LENGTH: usize = 2 * ShortHexStr::SOURCE_LENGTH; /// Format a new `ShortHexStr` from a byte slice. /// /// Returns `Err(InputTooShortError)` if the input byte slice length is less /// than `SOURCE_LENGTH` bytes. pub fn try_from_bytes(src_bytes: &[u8]) -> Result<ShortHexStr, InputTooShortError> { if src_bytes.len() >= ShortHexStr::SOURCE_LENGTH { let src_short_bytes = &src_bytes[0..ShortHexStr::SOURCE_LENGTH]; let mut dest_bytes = [0u8; ShortHexStr::LENGTH]; // We include a tiny hex encode here instead of using the `hex` crate's // `encode_to_slice`, since the compiler seems unable to inline across // the crate boundary. hex_encode(&src_short_bytes, &mut dest_bytes); Ok(Self(dest_bytes)) } else { Err(InputTooShortError) } } pub fn as_str(&self) -> &str { // We could also do str::from_utf8_unchecked here to avoid the unnecessary // runtime check. Shaves ~6-7 ns/iter in a micro bench but the unsafe is // probably not worth the hassle. str::from_utf8(&self.0).expect( "This can never fail since &self.0 will only ever contain the \ following characters: '0123456789abcdef', which are all valid \ ASCII characters and therefore all valid UTF-8", ) } } impl fmt::Debug for ShortHexStr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.as_str()) } } impl fmt::Display for ShortHexStr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.as_str()) } } impl Serialize for ShortHexStr { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(self.as_str()) } } /// Maps a nibble to its corresponding hex-formatted ASCII character. const HEX_CHARS_LOWER: &[u8; 16] = b"0123456789abcdef"; /// Format a byte as hex. Returns a tuple containing the first character and then /// the second character as ASCII bytes. #[inline(always)] fn byte2hex(byte: u8) -> (u8, u8) { let hi = HEX_CHARS_LOWER[((byte >> 4) & 0x0f) as usize]; let lo = HEX_CHARS_LOWER[(byte & 0x0f) as usize]; (hi, lo) } /// Hex encode a byte slice into the destination byte slice. #[inline(always)] fn hex_encode(src: &[u8], dst: &mut [u8]) { debug_checked_precondition!(dst.len() == 2 * src.len()); for (byte, out) in src.iter().zip(dst.chunks_mut(2)) { let (hi, lo) = byte2hex(*byte); out[0] = hi; out[1] = lo; } } #[cfg(test)] mod test { use super::*; use proptest::prelude::*; use std::{str, u8}; #[test] fn test_hex_encode() { let src = [0x12_u8, 0x34, 0xfe, 0xba]; let mut actual = [0u8; 8]; hex_encode(&src, &mut actual); let expected = b"1234feba"; assert_eq!(&actual, expected); } #[test] fn test_byte2hex_equivalence() { for byte in 0..=u8::MAX { let (hi, lo) = byte2hex(byte); let formatted_bytes = [hi, lo]; let actual = str::from_utf8(&formatted_bytes[..]).unwrap(); let expected = hex::encode(&[byte][..]); assert_eq!(actual, expected.as_str()); } } proptest! { #[test] fn test_address_short_str_equivalence(addr in any::<[u8; 16]>()) { let short_str_old = hex::encode(&addr[0..ShortHexStr::SOURCE_LENGTH]); let short_str_new = ShortHexStr::try_from_bytes(&addr).unwrap(); prop_assert_eq!(short_str_old.as_str(), short_str_new.as_str()); } } }
32.897059
88
0.618239
1857e4385db4925d054e874323feffb090212b2f
27,808
//! Commonly used as the type of extractor or response. mod addr; #[cfg(feature = "compression")] mod compress; #[cfg(feature = "cookie")] #[cfg_attr(docsrs, doc(cfg(feature = "cookie")))] pub mod cookie; mod data; mod form; mod json; #[cfg(feature = "multipart")] mod multipart; mod path; mod query; mod redirect; #[cfg(feature = "sse")] #[cfg_attr(docsrs, doc(cfg(feature = "sse")))] pub mod sse; #[cfg(feature = "static-files")] mod static_file; #[cfg(feature = "tempfile")] mod tempfile; #[doc(inline)] pub use headers; #[cfg(feature = "csrf")] mod csrf; mod typed_header; #[cfg(feature = "websocket")] #[cfg_attr(docsrs, doc(cfg(feature = "websocket")))] pub mod websocket; use std::{convert::Infallible, fmt::Debug}; pub use addr::{LocalAddr, RemoteAddr}; use bytes::Bytes; #[cfg(feature = "compression")] pub use compress::{Compress, CompressionAlgo}; #[cfg(feature = "csrf")] pub use csrf::{CsrfToken, CsrfVerifier}; pub use data::Data; pub use form::Form; pub use json::Json; #[cfg(feature = "multipart")] pub use multipart::{Field, Multipart}; pub use path::Path; pub(crate) use path::PathDeserializer; pub use query::Query; pub use redirect::Redirect; #[cfg(feature = "static-files")] pub use static_file::{StaticFileRequest, StaticFileResponse}; pub use typed_header::TypedHeader; #[cfg(feature = "tempfile")] pub use self::tempfile::TempFile; use crate::{ body::Body, error::{ReadBodyError, Result}, http::{ header::{HeaderMap, HeaderName}, HeaderValue, Method, StatusCode, Uri, Version, }, request::Request, response::Response, }; /// The body parameter type of [`FromRequest::from_request`] method. #[derive(Default)] pub struct RequestBody(Option<Body>); impl RequestBody { /// Create a new request body. pub fn new(body: Body) -> Self { Self(Some(body)) } /// Take a body, if it has already been taken, an error with the status code /// [`StatusCode::INTERNAL_SERVER_ERROR`] is returned. pub fn take(&mut self) -> Result<Body, ReadBodyError> { self.0.take().ok_or(ReadBodyError::BodyHasBeenTaken) } /// Returns `true` if body exists. #[inline] pub fn is_some(&self) -> bool { self.0.is_some() } /// Returns `true` if body does not exists. #[inline] pub fn is_none(&self) -> bool { self.0.is_none() } } /// Represents an type that can be extract from requests. /// /// # Provided Implementations /// /// - **Option&lt;T>** /// /// Extracts `T` from the incoming request, returns [`None`] if it /// fails. /// /// - **&Request** /// /// Extracts the [`Request`] from the incoming request. /// /// - **&RemoteAddr** /// /// Extracts the remote peer's address [`RemoteAddr`] from request. /// /// - **&LocalAddr** /// /// Extracts the local server's address [`LocalAddr`] from request. /// /// - **Method** /// /// Extracts the [`Method`] from the incoming request. /// /// - **Version** /// /// Extracts the [`Version`] from the incoming request. /// /// - **&Uri** /// /// Extracts the [`Uri`] from the incoming request. /// /// - **&HeaderMap** /// /// Extracts the [`HeaderMap`] from the incoming request. /// /// - **Data&lt;&T>** /// /// Extracts the [`Data`] from the incoming request. /// /// - **TypedHeader&lt;T>** /// /// Extracts the [`TypedHeader`] from the incoming request. /// /// - **Path&lt;T>** /// /// Extracts the [`Path`] from the incoming request. /// /// - **Query&lt;T>** /// /// Extracts the [`Query`] from the incoming request. /// /// - **Form&lt;T>** /// /// Extracts the [`Form`] from the incoming request. /// /// - **Json&lt;T>** /// /// Extracts the [`Json`] from the incoming request. /// /// _This extractor will take over the requested body, so you should avoid /// using multiple extractors of this type in one handler._ /// /// - **TempFile** /// /// Extracts the [`TempFile`] from the incoming request. /// /// _This extractor will take over the requested body, so you should avoid /// using multiple extractors of this type in one handler._ /// /// - **Multipart** /// /// Extracts the [`Multipart`] from the incoming request. /// /// _This extractor will take over the requested body, so you should avoid /// using multiple extractors of this type in one handler._ /// /// - **&CookieJar** /// /// Extracts the [`CookieJar`](cookie::CookieJar) from the incoming request. /// /// _Requires `CookieJarManager` middleware._ /// /// - **&Session** /// /// Extracts the [`Session`](crate::session::Session) from the incoming /// request. /// /// _Requires `CookieSession` or `RedisSession` middleware._ /// /// - **Body** /// /// Extracts the [`Body`] from the incoming request. /// /// _This extractor will take over the requested body, so you should avoid /// using multiple extractors of this type in one handler._ /// /// - **String** /// /// Extracts the body from the incoming request and parse it into utf8 /// [`String`]. /// /// _This extractor will take over the requested body, so you should avoid /// using multiple extractors of this type in one handler._ /// /// - **Vec&lt;u8>** /// /// Extracts the body from the incoming request and collect it into /// [`Vec<u8>`]. /// /// _This extractor will take over the requested body, so you should avoid /// using multiple extractors of this type in one handler._ /// /// - **Bytes** /// /// Extracts the body from the incoming request and collect it into /// [`Bytes`]. /// /// _This extractor will take over the requested body, so you should avoid /// using multiple extractors of this type in one handler._ /// /// - **WebSocket** /// /// Ready to accept a websocket [`WebSocket`](websocket::WebSocket) /// connection. /// /// - **Locale** /// /// Extracts the [`Locale`](crate::i18n::Locale) from the incoming /// request. /// /// - **StaticFileRequest** /// /// Ready to accept a static file request /// [`StaticFileRequest`](static_file::StaticFileRequest). /// /// # Create you own extractor /// /// The following is an example of a custom token extractor, which extracts the /// token from the `MyToken` header. /// /// ``` /// use std::fmt::{self, Display, Formatter}; /// /// use poem::{ /// get, handler, http::StatusCode, Endpoint, Error, FromRequest, Request, RequestBody, Result, /// Route, /// }; /// /// struct Token(String); /// /// #[poem::async_trait] /// impl<'a> FromRequest<'a> for Token { /// async fn from_request(req: &'a Request, body: &mut RequestBody) -> Result<Self> { /// let token = req /// .headers() /// .get("MyToken") /// .and_then(|value| value.to_str().ok()) /// .ok_or_else(|| Error::from_string("missing token", StatusCode::BAD_REQUEST))?; /// Ok(Token(token.to_string())) /// } /// } /// /// #[handler] /// async fn index(token: Token) { /// assert_eq!(token.0, "token123"); /// } /// /// let app = Route::new().at("/", get(index)); /// # tokio::runtime::Runtime::new().unwrap().block_on(async { /// let _ = index /// .call(Request::builder().header("MyToken", "token123").finish()) /// .await; /// # }); /// ``` #[async_trait::async_trait] pub trait FromRequest<'a>: Sized { /// Extract from request head and body. async fn from_request(req: &'a Request, body: &mut RequestBody) -> Result<Self>; /// Extract from request head. /// /// If you know that this type does not need to extract the body, then you /// can just use it. /// /// For example [`Query`], [`Path`] they only extract the content from the /// request head, using this method would be more convenient. /// `String`,`Vec<u8>` they extract the body of the request, using this /// method will cause `ReadBodyError` error. async fn from_request_without_body(req: &'a Request) -> Result<Self> { Self::from_request(req, &mut Default::default()).await } } /// Represents a type that can convert into response. /// /// # Provided Implementations /// /// - **()** /// /// Sets the status to `OK` with an empty body. /// /// - **&'static str** /// /// Sets the status to `OK` and the `Content-Type` to `text/plain`. The /// string is used as the body of the response. /// /// - **String** /// /// Sets the status to `OK` and the `Content-Type` to `text/plain`. The /// string is used as the body of the response. /// /// - **&'static [u8]** /// /// Sets the status to `OK` and the `Content-Type` to /// `application/octet-stream`. The slice is used as the body of the response. /// /// - **Html&lt;T>** /// /// Sets the status to `OK` and the `Content-Type` to `text/html`. `T` is /// used as the body of the response. /// /// - **Json&lt;T>** /// /// Sets the status to `OK` and the `Content-Type` to `application/json`. Use /// [`serde_json`](https://crates.io/crates/serde_json) to serialize `T` into a json string. /// /// - **Bytes** /// /// Sets the status to `OK` and the `Content-Type` to /// `application/octet-stream`. The bytes is used as the body of the response. /// /// - **Vec&lt;u8>** /// /// Sets the status to `OK` and the `Content-Type` to /// `application/octet-stream`. The vector’s data is used as the body of the /// response. /// /// - **Body** /// /// Sets the status to `OK` and use the specified body. /// /// - **StatusCode** /// /// Sets the status to the specified status code [`StatusCode`] with an empty /// body. /// /// - **(StatusCode, T)** /// /// Convert `T` to response and set the specified status code [`StatusCode`]. /// /// - **(StatusCode, HeaderMap, T)** /// /// Convert `T` to response and set the specified status code [`StatusCode`], /// and then merge the specified [`HeaderMap`]. /// /// - **Response** /// /// The implementation for [`Response`] always returns itself. /// /// - **Compress&lt;T>** /// /// Call `T::into_response` to get the response, then compress the response /// body with the specified algorithm, and set the correct `Content-Encoding` /// header. /// /// - **SSE** /// /// Sets the status to `OK` and the `Content-Type` to `text/event-stream` /// with an event stream body. Use the [`SSE::new`](sse::SSE::new) function to /// create it. /// /// # Create you own response /// /// ``` /// use poem::{handler, http::Uri, web::Query, Endpoint, IntoResponse, Request, Response}; /// use serde::Deserialize; /// /// struct Hello(Option<String>); /// /// impl IntoResponse for Hello { /// fn into_response(self) -> Response { /// let msg = match self.0 { /// Some(name) => format!("hello {}", name), /// None => format!("hello"), /// }; /// msg.into_response() /// } /// } /// /// #[derive(Deserialize)] /// struct Params { /// name: Option<String>, /// } /// /// #[handler] /// async fn index(params: Query<Params>) -> impl IntoResponse { /// Hello(params.0.name) /// } /// /// # tokio::runtime::Runtime::new().unwrap().block_on(async { /// assert_eq!( /// index /// .call( /// Request::builder() /// .uri(Uri::from_static("/?name=sunli")) /// .finish() /// ) /// .await /// .unwrap() /// .take_body() /// .into_string() /// .await /// .unwrap(), /// "hello sunli" /// ); /// /// assert_eq!( /// index /// .call(Request::builder().uri(Uri::from_static("/")).finish()) /// .await /// .unwrap() /// .take_body() /// .into_string() /// .await /// .unwrap(), /// "hello" /// ); /// # }); /// ``` pub trait IntoResponse: Send { /// Consume itself and return [`Response`]. fn into_response(self) -> Response; /// Wrap an `impl IntoResponse` to add a header. /// /// # Example /// /// ``` /// use poem::{http::HeaderValue, IntoResponse}; /// /// # tokio::runtime::Runtime::new().unwrap().block_on(async { /// let resp = "hello".with_header("foo", "bar").into_response(); /// assert_eq!( /// resp.headers().get("foo"), /// Some(&HeaderValue::from_static("bar")) /// ); /// assert_eq!(resp.into_body().into_string().await.unwrap(), "hello"); /// # }); /// ``` fn with_header<K, V>(self, key: K, value: V) -> WithHeader<Self> where K: TryInto<HeaderName>, V: TryInto<HeaderValue>, Self: Sized, { let key = key.try_into().ok(); let value = value.try_into().ok(); WithHeader { inner: self, header: key.zip(value), } } /// Wrap an `impl IntoResponse` to set a status code. /// /// # Example /// /// ``` /// use poem::{http::StatusCode, IntoResponse}; /// /// # tokio::runtime::Runtime::new().unwrap().block_on(async { /// let resp = "hello".with_status(StatusCode::CONFLICT).into_response(); /// assert_eq!(resp.status(), StatusCode::CONFLICT); /// assert_eq!(resp.into_body().into_string().await.unwrap(), "hello"); /// # }); /// ``` fn with_status(self, status: StatusCode) -> WithStatus<Self> where Self: Sized, { WithStatus { inner: self, status, } } /// Wrap an `impl IntoResponse` to set a body. /// /// /// # Example /// /// ``` /// use poem::{http::StatusCode, IntoResponse}; /// /// # tokio::runtime::Runtime::new().unwrap().block_on(async { /// let resp = StatusCode::CONFLICT.with_body("hello").into_response(); /// assert_eq!(resp.status(), StatusCode::CONFLICT); /// assert_eq!(resp.into_body().into_string().await.unwrap(), "hello"); /// # }); /// ``` fn with_body(self, body: impl Into<Body>) -> WithBody<Self> where Self: Sized, { WithBody { inner: self, body: body.into(), } } } impl IntoResponse for Infallible { fn into_response(self) -> Response { unreachable!() } } /// Returned by [`with_header`](IntoResponse::with_header) method. pub struct WithHeader<T> { inner: T, header: Option<(HeaderName, HeaderValue)>, } impl<T: IntoResponse> IntoResponse for WithHeader<T> { fn into_response(self) -> Response { let mut resp = self.inner.into_response(); if let Some((key, value)) = &self.header { resp.headers_mut().append(key.clone(), value.clone()); } resp } } /// Returned by [`with_header`](IntoResponse::with_status) method. pub struct WithStatus<T> { inner: T, status: StatusCode, } impl<T: IntoResponse> IntoResponse for WithStatus<T> { fn into_response(self) -> Response { let mut resp = self.inner.into_response(); resp.set_status(self.status); resp } } /// Returned by [`with_body`](IntoResponse::with_body) method. pub struct WithBody<T> { inner: T, body: Body, } impl<T: IntoResponse> IntoResponse for WithBody<T> { fn into_response(self) -> Response { let mut resp = self.inner.into_response(); resp.set_body(self.body); resp } } impl IntoResponse for Response { fn into_response(self) -> Response { self } } impl IntoResponse for String { fn into_response(self) -> Response { Response::builder() .content_type("text/plain; charset=utf8") .body(self) } } impl IntoResponse for &'static str { fn into_response(self) -> Response { Response::builder() .content_type("text/plain; charset=utf8") .body(self) } } impl IntoResponse for &'static [u8] { fn into_response(self) -> Response { Response::builder() .content_type("application/octet-stream") .body(self) } } impl IntoResponse for Bytes { fn into_response(self) -> Response { Response::builder() .content_type("application/octet-stream") .body(self) } } impl IntoResponse for Vec<u8> { fn into_response(self) -> Response { Response::builder() .content_type("application/octet-stream") .body(self) } } impl IntoResponse for () { fn into_response(self) -> Response { Response::builder().body(Body::empty()) } } impl IntoResponse for Body { fn into_response(self) -> Response { Response::builder().body(self) } } impl IntoResponse for StatusCode { fn into_response(self) -> Response { Response::builder().status(self).finish() } } impl<T: IntoResponse> IntoResponse for (StatusCode, T) { fn into_response(self) -> Response { let mut resp = self.1.into_response(); resp.set_status(self.0); resp } } impl<T: IntoResponse> IntoResponse for (StatusCode, HeaderMap, T) { fn into_response(self) -> Response { let mut resp = self.2.into_response(); resp.set_status(self.0); resp.headers_mut().extend(self.1.into_iter()); resp } } impl<T: IntoResponse> IntoResponse for (HeaderMap, T) { fn into_response(self) -> Response { let mut resp = self.1.into_response(); resp.headers_mut().extend(self.0.into_iter()); resp } } /// An HTML response. #[derive(Debug, Clone, Eq, PartialEq, Default)] pub struct Html<T>(pub T); impl<T: Into<String> + Send> IntoResponse for Html<T> { fn into_response(self) -> Response { Response::builder() .content_type("text/html") .body(self.0.into()) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for &'a Request { async fn from_request(req: &'a Request, _body: &mut RequestBody) -> Result<Self> { Ok(req) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for &'a Uri { async fn from_request(req: &'a Request, _body: &mut RequestBody) -> Result<Self> { Ok(req.uri()) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for Method { async fn from_request(req: &'a Request, _body: &mut RequestBody) -> Result<Self> { Ok(req.method().clone()) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for Version { async fn from_request(req: &'a Request, _body: &mut RequestBody) -> Result<Self> { Ok(req.version()) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for &'a HeaderMap { async fn from_request(req: &'a Request, _body: &mut RequestBody) -> Result<Self> { Ok(req.headers()) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for Body { async fn from_request(_req: &'a Request, body: &mut RequestBody) -> Result<Self> { Ok(body.take()?) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for String { async fn from_request(_req: &'a Request, body: &mut RequestBody) -> Result<Self> { let data = body.take()?.into_bytes().await?; Ok(String::from_utf8(data.to_vec()).map_err(ReadBodyError::Utf8)?) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for Bytes { async fn from_request(_req: &'a Request, body: &mut RequestBody) -> Result<Self> { Ok(body.take()?.into_bytes().await?) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for Vec<u8> { async fn from_request(_req: &'a Request, body: &mut RequestBody) -> Result<Self> { Ok(body.take()?.into_vec().await?) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for &'a RemoteAddr { async fn from_request(req: &'a Request, _body: &mut RequestBody) -> Result<Self> { Ok(&req.state().remote_addr) } } #[async_trait::async_trait] impl<'a> FromRequest<'a> for &'a LocalAddr { async fn from_request(req: &'a Request, _body: &mut RequestBody) -> Result<Self> { Ok(&req.state().local_addr) } } #[async_trait::async_trait] impl<'a, T: FromRequest<'a>> FromRequest<'a> for Option<T> { async fn from_request(req: &'a Request, body: &mut RequestBody) -> Result<Self> { Ok(T::from_request(req, body).await.ok()) } } #[async_trait::async_trait] impl<'a, T: FromRequest<'a>> FromRequest<'a> for Result<T> { async fn from_request(req: &'a Request, body: &mut RequestBody) -> Result<Self> { Ok(T::from_request(req, body).await) } } #[cfg(test)] mod tests { use super::*; use crate::Addr; #[tokio::test] async fn into_response() { // String let resp = "abc".to_string().into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.into_body().into_string().await.unwrap(), "abc"); // &'static str let resp = "abc".into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.into_body().into_string().await.unwrap(), "abc"); // &'static [u8] let resp = [1, 2, 3].into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.into_body().into_vec().await.unwrap(), &[1, 2, 3]); // Bytes let resp = Bytes::from_static(&[1, 2, 3]).into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.into_body().into_vec().await.unwrap(), &[1, 2, 3]); // Vec<u8> let resp = vec![1, 2, 3].into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.into_body().into_vec().await.unwrap(), &[1, 2, 3]); // () let resp = ().into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.into_body().into_vec().await.unwrap(), &[] as &[u8]); // (StatusCode, T) let resp = (StatusCode::BAD_GATEWAY, "abc").into_response(); assert_eq!(resp.status(), StatusCode::BAD_GATEWAY); assert_eq!(resp.into_body().into_string().await.unwrap(), "abc"); // (HeaderMap, T) let resp = Response::builder() .status(StatusCode::BAD_GATEWAY) .header("Value1", "567") .body("abc"); let mut headers = HeaderMap::new(); headers.append("Value2", HeaderValue::from_static("123")); let resp = (headers, resp).into_response(); assert_eq!(resp.status(), StatusCode::BAD_GATEWAY); assert_eq!( resp.headers().get("Value1"), Some(&HeaderValue::from_static("567")) ); assert_eq!( resp.headers().get("Value2"), Some(&HeaderValue::from_static("123")) ); assert_eq!(resp.into_body().into_string().await.unwrap(), "abc"); // (StatusCode, HeaderMap, T) let resp = Response::builder() .status(StatusCode::OK) .header("Value1", "567") .body("abc"); let mut headers = HeaderMap::new(); headers.append("Value2", HeaderValue::from_static("123")); let resp = (StatusCode::BAD_GATEWAY, headers, resp).into_response(); assert_eq!(resp.status(), StatusCode::BAD_GATEWAY); assert_eq!( resp.headers().get("Value1"), Some(&HeaderValue::from_static("567")) ); assert_eq!( resp.headers().get("Value2"), Some(&HeaderValue::from_static("123")) ); assert_eq!(resp.into_body().into_string().await.unwrap(), "abc"); // StatusCode let resp = StatusCode::CREATED.into_response(); assert_eq!(resp.status(), StatusCode::CREATED); assert!(resp.into_body().into_string().await.unwrap().is_empty()); // Html let resp = Html("abc").into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.content_type(), Some("text/html")); assert_eq!(resp.into_body().into_string().await.unwrap(), "abc"); // Json let resp = Json(serde_json::json!({ "a": 1, "b": 2})).into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!(resp.content_type(), Some("application/json")); assert_eq!( resp.into_body().into_string().await.unwrap(), r#"{"a":1,"b":2}"# ); // WithBody let resp = StatusCode::CONFLICT.with_body("abc").into_response(); assert_eq!(resp.status(), StatusCode::CONFLICT); assert_eq!(resp.into_body().into_string().await.unwrap(), "abc"); // WithHeader let resp = Response::builder() .header("Value1", "123") .finish() .with_header("Value2", "456") .with_header("Value3", "789") .into_response(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!( resp.headers().get("Value1"), Some(&HeaderValue::from_static("123")) ); assert_eq!( resp.headers().get("Value2"), Some(&HeaderValue::from_static("456")) ); assert_eq!( resp.headers().get("Value3"), Some(&HeaderValue::from_static("789")) ); // WithStatus let resp = StatusCode::CONFLICT .with_status(StatusCode::BAD_GATEWAY) .into_response(); assert_eq!(resp.status(), StatusCode::BAD_GATEWAY); assert!(resp.into_body().into_string().await.unwrap().is_empty()); } #[tokio::test] async fn from_request() { fn request() -> Request { let mut req = Request::builder() .version(Version::HTTP_11) .method(Method::DELETE) .header("Value1", "123") .header("Value2", "456") .uri(Uri::from_static("http://example.com/a/b")) .body("abc"); req.state_mut().remote_addr = RemoteAddr(Addr::custom("test", "example")); req.state_mut().local_addr = LocalAddr(Addr::custom("test", "example-local")); req } let req = request(); let (req, mut body) = req.split(); // Version assert_eq!( Version::from_request(&req, &mut body).await.unwrap(), Version::HTTP_11 ); // &HeaderMap assert_eq!( <&HeaderMap>::from_request(&req, &mut body).await.unwrap(), &{ let mut headers = HeaderMap::new(); headers.append("Value1", HeaderValue::from_static("123")); headers.append("Value2", HeaderValue::from_static("456")); headers } ); // &Uri assert_eq!( <&Uri>::from_request(&req, &mut body).await.unwrap(), &Uri::from_static("http://example.com/a/b") ); // &RemoteAddr assert_eq!( <&RemoteAddr>::from_request(&req, &mut body).await.unwrap(), &RemoteAddr(Addr::custom("test", "example")) ); // &LocalAddr assert_eq!( <&LocalAddr>::from_request(&req, &mut body).await.unwrap(), &LocalAddr(Addr::custom("test", "example-local")) ); // &Method assert_eq!( <Method>::from_request(&req, &mut body).await.unwrap(), Method::DELETE ); // String let req = request(); let (req, mut body) = req.split(); assert_eq!( String::from_request(&req, &mut body).await.unwrap(), "abc".to_string() ); // Vec<u8> let req = request(); let (req, mut body) = req.split(); assert_eq!( <Vec<u8>>::from_request(&req, &mut body).await.unwrap(), b"abc" ); // Bytes let req = request(); let (req, mut body) = req.split(); assert_eq!( Bytes::from_request(&req, &mut body).await.unwrap(), Bytes::from_static(b"abc") ); } }
28.638517
99
0.575806
675fe9a4c751e6a8a994d0852d40f15e368321c5
36,648
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use crate::error::TeaclaveManagementServiceError; use anyhow::{anyhow, Result}; use std::collections::HashMap; use std::convert::TryInto; use std::prelude::v1::*; use std::sync::{Arc, SgxMutex as Mutex}; use teaclave_proto::teaclave_frontend_service::{ ApproveTaskRequest, ApproveTaskResponse, AssignDataRequest, AssignDataResponse, CancelTaskRequest, CancelTaskResponse, CreateTaskRequest, CreateTaskResponse, DeleteFunctionRequest, DeleteFunctionResponse, GetFunctionRequest, GetFunctionResponse, GetInputFileRequest, GetInputFileResponse, GetOutputFileRequest, GetOutputFileResponse, GetTaskRequest, GetTaskResponse, InvokeTaskRequest, InvokeTaskResponse, ListFunctionsRequest, ListFunctionsResponse, RegisterFunctionRequest, RegisterFunctionResponse, RegisterFusionOutputRequest, RegisterFusionOutputResponse, RegisterInputFileRequest, RegisterInputFileResponse, RegisterInputFromOutputRequest, RegisterInputFromOutputResponse, RegisterOutputFileRequest, RegisterOutputFileResponse, UpdateFunctionRequest, UpdateFunctionResponse, UpdateInputFileRequest, UpdateInputFileResponse, UpdateOutputFileRequest, UpdateOutputFileResponse, }; use teaclave_proto::teaclave_management_service::TeaclaveManagement; use teaclave_proto::teaclave_storage_service::{ DeleteRequest, EnqueueRequest, GetRequest, PutRequest, TeaclaveStorageClient, }; use teaclave_rpc::endpoint::Endpoint; use teaclave_rpc::Request; use teaclave_service_enclave_utils::{ensure, teaclave_service}; use teaclave_types::*; use url::Url; use uuid::Uuid; #[teaclave_service( teaclave_management_service, TeaclaveManagement, TeaclaveManagementServiceError )] #[derive(Clone)] pub(crate) struct TeaclaveManagementService { storage_client: Arc<Mutex<TeaclaveStorageClient>>, } impl TeaclaveManagement for TeaclaveManagementService { // access control: none fn register_input_file( &self, request: Request<RegisterInputFileRequest>, ) -> TeaclaveServiceResponseResult<RegisterInputFileResponse> { let user_id = self.get_request_user_id(request.metadata())?; let request = request.message; let input_file = TeaclaveInputFile::new( request.url, request.cmac, request.crypto_info, vec![user_id], ); self.write_to_db(&input_file) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let response = RegisterInputFileResponse::new(input_file.external_id()); Ok(response) } // access control: // 1) exisiting_file.owner_list.len() == 1 // 2) user_id in existing_file.owner_list fn update_input_file( &self, request: Request<UpdateInputFileRequest>, ) -> TeaclaveServiceResponseResult<UpdateInputFileResponse> { let user_id = self.get_request_user_id(request.metadata())?; let request = request.message; let old_input_file: TeaclaveInputFile = self .read_from_db(&request.data_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; ensure!( old_input_file.owner == OwnerList::from(vec![user_id]), TeaclaveManagementServiceError::PermissionDenied ); let input_file = TeaclaveInputFile::new( request.url, old_input_file.cmac, old_input_file.crypto_info, old_input_file.owner, ); self.write_to_db(&input_file) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let response = UpdateInputFileResponse::new(input_file.external_id()); Ok(response) } // access control: none fn register_output_file( &self, request: Request<RegisterOutputFileRequest>, ) -> TeaclaveServiceResponseResult<RegisterOutputFileResponse> { let user_id = self.get_request_user_id(request.metadata())?; let request = request.message; let output_file = TeaclaveOutputFile::new(request.url, request.crypto_info, vec![user_id]); self.write_to_db(&output_file) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let response = RegisterOutputFileResponse::new(output_file.external_id()); Ok(response) } // access control: // 1) exisiting_file.owner_list.len() == 1 // 2) user_id in existing_file.owner_list fn update_output_file( &self, request: Request<UpdateOutputFileRequest>, ) -> TeaclaveServiceResponseResult<UpdateOutputFileResponse> { let user_id = self.get_request_user_id(request.metadata())?; let request = request.message; let old_output_file: TeaclaveOutputFile = self .read_from_db(&request.data_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; ensure!( old_output_file.owner == OwnerList::from(vec![user_id]), TeaclaveManagementServiceError::PermissionDenied ); let output_file = TeaclaveOutputFile::new( request.url, old_output_file.crypto_info, old_output_file.owner, ); self.write_to_db(&output_file) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let response = UpdateOutputFileResponse::new(output_file.external_id()); Ok(response) } // access control: user_id in owner_list fn register_fusion_output( &self, request: Request<RegisterFusionOutputRequest>, ) -> TeaclaveServiceResponseResult<RegisterFusionOutputResponse> { let user_id = self.get_request_user_id(request.metadata())?; let owner_list = request.message.owner_list; ensure!( owner_list.len() > 1 && owner_list.contains(&user_id), TeaclaveManagementServiceError::PermissionDenied ); let output_file = self .create_fusion_data(owner_list) .map_err(|_| TeaclaveManagementServiceError::DataError)?; self.write_to_db(&output_file) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let response = RegisterFusionOutputResponse::new(output_file.external_id()); Ok(response) } // access control: // 1) user_id in output.owner // 2) cmac != none fn register_input_from_output( &self, request: Request<RegisterInputFromOutputRequest>, ) -> TeaclaveServiceResponseResult<RegisterInputFromOutputResponse> { let user_id = self.get_request_user_id(request.metadata())?; let output: TeaclaveOutputFile = self .read_from_db(&request.message.data_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; ensure!( output.owner.contains(&user_id), TeaclaveManagementServiceError::PermissionDenied ); let input = TeaclaveInputFile::from_output(output) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; self.write_to_db(&input) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let response = RegisterInputFromOutputResponse::new(input.external_id()); Ok(response) } // access control: output_file.owner contains user_id fn get_output_file( &self, request: Request<GetOutputFileRequest>, ) -> TeaclaveServiceResponseResult<GetOutputFileResponse> { let user_id = self.get_request_user_id(request.metadata())?; let output_file: TeaclaveOutputFile = self .read_from_db(&request.message.data_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; ensure!( output_file.owner.contains(&user_id), TeaclaveManagementServiceError::PermissionDenied ); let response = GetOutputFileResponse::new(output_file.owner, output_file.cmac); Ok(response) } // access control: input_file.owner contains user_id fn get_input_file( &self, request: Request<GetInputFileRequest>, ) -> TeaclaveServiceResponseResult<GetInputFileResponse> { let user_id = self.get_request_user_id(request.metadata())?; let input_file: TeaclaveInputFile = self .read_from_db(&request.message.data_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; ensure!( input_file.owner.contains(&user_id), TeaclaveManagementServiceError::PermissionDenied ); let response = GetInputFileResponse::new(input_file.owner, input_file.cmac); Ok(response) } // access_control: none fn register_function( &self, request: Request<RegisterFunctionRequest>, ) -> TeaclaveServiceResponseResult<RegisterFunctionResponse> { let user_id = self.get_request_user_id(request.metadata())?; let function = FunctionBuilder::from(request.message) .id(Uuid::new_v4()) .owner(user_id.clone()) .build(); self.write_to_db(&function) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let mut u = User::default(); u.id = user_id; let external_id = u.external_id(); let user: Result<User> = self.read_from_db(&external_id); match user { Ok(mut us) => { us.registered_functions .push(function.external_id().to_string()); self.write_to_db(&us) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; } Err(_) => { u.registered_functions .push(function.external_id().to_string()); self.write_to_db(&u) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; } } // Update allowed function list for users for user_id in &function.user_allowlist { let mut u = User::default(); u.id = user_id.into(); let external_id = u.external_id(); let user: Result<User> = self.read_from_db(&external_id); match user { Ok(mut us) => { us.allowed_functions .push(function.external_id().to_string()); self.write_to_db(&us) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; } Err(_) => { u.allowed_functions.push(function.external_id().to_string()); self.write_to_db(&u) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; } } } let response = RegisterFunctionResponse::new(function.external_id()); Ok(response) } fn update_function( &self, request: Request<UpdateFunctionRequest>, ) -> TeaclaveServiceResponseResult<UpdateFunctionResponse> { let user_id = self.get_request_user_id(request.metadata())?; let function = FunctionBuilder::from(request.message) .owner(user_id) .build(); self.write_to_db(&function) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let response = UpdateFunctionResponse::new(function.external_id()); Ok(response) } // access control: function.public || function.owner == user_id fn get_function( &self, request: Request<GetFunctionRequest>, ) -> TeaclaveServiceResponseResult<GetFunctionResponse> { let user_id = self.get_request_user_id(request.metadata())?; let function: Function = self .read_from_db(&request.message.function_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; if function.public || function.owner == user_id { let response = GetFunctionResponse { name: function.name, description: function.description, owner: function.owner, executor_type: function.executor_type, payload: function.payload, public: function.public, arguments: function.arguments, inputs: function.inputs, outputs: function.outputs, user_allowlist: function.user_allowlist, }; Ok(response) } else if !function.public && function.user_allowlist.contains(&user_id.into()) { let response = GetFunctionResponse { name: function.name, description: function.description, owner: function.owner, executor_type: function.executor_type, payload: vec![], public: function.public, arguments: function.arguments, inputs: function.inputs, outputs: function.outputs, user_allowlist: vec![], }; Ok(response) } else { Err(TeaclaveManagementServiceError::PermissionDenied.into()) } } // access control: function.owner == user_id fn delete_function( &self, request: Request<DeleteFunctionRequest>, ) -> TeaclaveServiceResponseResult<DeleteFunctionResponse> { let user_id = self.get_request_user_id(request.metadata())?; let function: Function = self .read_from_db(&request.message.function_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; ensure!( function.owner == user_id, TeaclaveManagementServiceError::PermissionDenied ); self.delete_from_db(&request.message.function_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; let response = DeleteFunctionResponse {}; Ok(response) } // access contro: user_id = request.user_id fn list_functions( &self, request: Request<ListFunctionsRequest>, ) -> TeaclaveServiceResponseResult<ListFunctionsResponse> { let mut request_user_id = request.message.user_id.clone(); let current_user_id = self.get_request_user_id(request.metadata())?; let role = self.get_request_role(request.metadata())?; if role != UserRole::PlatformAdmin { ensure!( request_user_id == current_user_id, TeaclaveManagementServiceError::PermissionDenied ); } if let UserRole::DataOwner(s) = role { request_user_id = s.into(); } let mut u = User::default(); u.id = request_user_id; let external_id = u.external_id(); let user: Result<User> = self.read_from_db(&external_id); match user { Ok(us) => { let response = ListFunctionsResponse { registered_functions: us.registered_functions, allowed_functions: us.allowed_functions, }; Ok(response) } Err(_) => { let response = ListFunctionsResponse::default(); Ok(response) } } } // access control: none // when a task is created, following rules will be verified: // 1) arugments match function definition // 2) input match function definition // 3) output match function definition // 4) requested user_id in the user_allowlist fn create_task( &self, request: Request<CreateTaskRequest>, ) -> TeaclaveServiceResponseResult<CreateTaskResponse> { let user_id = self.get_request_user_id(request.metadata())?; let role = self.get_request_role(request.metadata())?; let request = request.message; let function: Function = self .read_from_db(&request.function_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; match role { UserRole::DataOwner(a) | UserRole::DataOwnerManager(a) => { ensure!( (function.public || function.user_allowlist.contains(&a)), TeaclaveManagementServiceError::PermissionDenied ); } UserRole::PlatformAdmin => (), _ => { return Err(TeaclaveManagementServiceError::PermissionDenied.into()); } } let task = Task::<Create>::new( user_id, request.executor, request.function_arguments, request.inputs_ownership, request.outputs_ownership, function, ) .map_err(|_| TeaclaveManagementServiceError::BadTask)?; log::debug!("CreateTask: {:?}", task); let ts: TaskState = task.into(); self.write_to_db(&ts) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; let response = CreateTaskResponse::new(ts.external_id()); Ok(response) } // access control: task.participants.contains(&user_id) fn get_task( &self, request: Request<GetTaskRequest>, ) -> TeaclaveServiceResponseResult<GetTaskResponse> { let user_id = self.get_request_user_id(request.metadata())?; let ts: TaskState = self .read_from_db(&request.message.task_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; ensure!( ts.has_participant(&user_id), TeaclaveManagementServiceError::PermissionDenied ); log::debug!("GetTask: {:?}", ts); let response = GetTaskResponse { task_id: ts.external_id(), creator: ts.creator, function_id: ts.function_id, function_owner: ts.function_owner, function_arguments: ts.function_arguments, inputs_ownership: ts.inputs_ownership, outputs_ownership: ts.outputs_ownership, participants: ts.participants, approved_users: ts.approved_users, assigned_inputs: ts.assigned_inputs.external_ids(), assigned_outputs: ts.assigned_outputs.external_ids(), result: ts.result, status: ts.status, }; Ok(response) } // access control: // 1) task.participants.contains(user_id) // 2) task.status == Created // 3) user can use the data: // * input file: user_id == input_file.owner contains user_id // * output file: output_file.owner contains user_id && output_file.cmac.is_none() // 4) the data can be assgined to the task: // * inputs_ownership or outputs_ownership contains the data name // * input file: OwnerList match input_file.owner // * output file: OwnerList match output_file.owner fn assign_data( &self, request: Request<AssignDataRequest>, ) -> TeaclaveServiceResponseResult<AssignDataResponse> { let user_id = self.get_request_user_id(request.metadata())?; let request = request.message; let ts: TaskState = self .read_from_db(&request.task_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; ensure!( ts.has_participant(&user_id), TeaclaveManagementServiceError::PermissionDenied ); let mut task: Task<Assign> = ts.try_into().map_err(|e| { log::warn!("Assign state error: {:?}", e); TeaclaveManagementServiceError::PermissionDenied })?; for (data_name, data_id) in request.inputs.iter() { let file: TeaclaveInputFile = self .read_from_db(&data_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; task.assign_input(&user_id, data_name, file) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; } for (data_name, data_id) in request.outputs.iter() { let file: TeaclaveOutputFile = self .read_from_db(&data_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; task.assign_output(&user_id, data_name, file) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; } log::debug!("AssignData: {:?}", task); let ts: TaskState = task.into(); self.write_to_db(&ts) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; Ok(AssignDataResponse) } // access_control: // 1) task status == Ready // 2) user_id in task.participants fn approve_task( &self, request: Request<ApproveTaskRequest>, ) -> TeaclaveServiceResponseResult<ApproveTaskResponse> { let user_id = self.get_request_user_id(request.metadata())?; let request = request.message; let ts: TaskState = self .read_from_db(&request.task_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; let mut task: Task<Approve> = ts.try_into().map_err(|e| { log::warn!("Approve state error: {:?}", e); TeaclaveManagementServiceError::PermissionDenied })?; task.approve(&user_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; log::debug!("ApproveTask: approve:{:?}", task); let ts: TaskState = task.into(); self.write_to_db(&ts) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; Ok(ApproveTaskResponse) } // access_control: // 1) task status == Approved // 2) user_id == task.creator fn invoke_task( &self, request: Request<InvokeTaskRequest>, ) -> TeaclaveServiceResponseResult<InvokeTaskResponse> { let user_id = self.get_request_user_id(request.metadata())?; let request = request.message; let ts: TaskState = self .read_from_db(&request.task_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; // Early validation ensure!( ts.has_creator(&user_id), TeaclaveManagementServiceError::PermissionDenied ); let function: Function = self .read_from_db(&ts.function_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; log::debug!("InvokeTask: get function: {:?}", function); let mut task: Task<Stage> = ts.try_into().map_err(|e| { log::warn!("Stage state error: {:?}", e); TeaclaveManagementServiceError::PermissionDenied })?; log::debug!("InvokeTask: get task: {:?}", task); let staged_task = task.stage_for_running(&user_id, function)?; log::debug!("InvokeTask: staged task: {:?}", staged_task); self.enqueue_to_db(StagedTask::get_queue_key().as_bytes(), &staged_task)?; let ts: TaskState = task.into(); self.write_to_db(&ts) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; Ok(InvokeTaskResponse) } // access_control: // 1) user_id == task.creator // 2) user_role == admin fn cancel_task( &self, request: Request<CancelTaskRequest>, ) -> TeaclaveServiceResponseResult<CancelTaskResponse> { let user_id = self.get_request_user_id(request.metadata())?; let role = self.get_request_role(request.metadata())?; let request = request.message; let ts: TaskState = self .read_from_db(&request.task_id) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; match role { UserRole::PlatformAdmin => {} _ => { ensure!( ts.has_creator(&user_id), TeaclaveManagementServiceError::PermissionDenied ); } } match ts.status { // need scheduler to cancel the task TaskStatus::Staged | TaskStatus::Running => { self.enqueue_to_db(CANCEL_QUEUE_KEY.as_bytes(), &ts)?; } _ => { // early cancelation // race will not affect correctness/privacy let mut task: Task<Cancel> = ts.try_into().map_err(|e| { log::warn!("Cancel state error: {:?}", e); TeaclaveManagementServiceError::PermissionDenied })?; log::debug!("Canceled Task: {:?}", task); task.update_result(TaskResult::Err(TaskFailure { reason: "Task canceled".to_string(), })) .map_err(|_| TeaclaveManagementServiceError::PermissionDenied)?; let ts: TaskState = task.into(); self.write_to_db(&ts) .map_err(|_| TeaclaveManagementServiceError::StorageError)?; log::warn!("Canceled Task: writtenback"); } } Ok(CancelTaskResponse) } } impl TeaclaveManagementService { pub(crate) fn new(storage_service_endpoint: Endpoint) -> Result<Self> { let mut i = 0; let channel = loop { match storage_service_endpoint.connect() { Ok(channel) => break channel, Err(_) => { anyhow::ensure!(i < 10, "failed to connect to storage service"); log::warn!("Failed to connect to storage service, retry {}", i); i += 1; } } std::thread::sleep(std::time::Duration::from_secs(3)); }; let storage_client = Arc::new(Mutex::new(TeaclaveStorageClient::new(channel)?)); let service = Self { storage_client }; #[cfg(test_mode)] service.add_mock_data()?; Ok(service) } pub fn create_fusion_data(&self, owners: impl Into<OwnerList>) -> Result<TeaclaveOutputFile> { let uuid = Uuid::new_v4(); let url = format!("fusion:///TEACLAVE_FUSION_BASE/{}.fusion", uuid.to_string()); let url = Url::parse(&url).map_err(|_| anyhow!("invalid url"))?; let crypto_info = FileCrypto::default(); Ok(TeaclaveOutputFile::new(url, crypto_info, owners)) } fn get_request_user_id( &self, meta: &HashMap<String, String>, ) -> TeaclaveServiceResponseResult<UserID> { let user_id = meta .get("id") .ok_or(TeaclaveManagementServiceError::InvalidRequest)?; Ok(user_id.to_string().into()) } fn get_request_role( &self, meta: &HashMap<String, String>, ) -> TeaclaveServiceResponseResult<UserRole> { let role = meta .get("role") .ok_or(TeaclaveManagementServiceError::InvalidRequest)?; Ok(UserRole::from_str(role)) } fn write_to_db(&self, item: &impl Storable) -> Result<()> { let k = item.key(); let v = item.to_vec()?; let put_request = PutRequest::new(k.as_slice(), v.as_slice()); let _put_response = self .storage_client .clone() .lock() .map_err(|_| anyhow!("Cannot lock storage client"))? .put(put_request)?; Ok(()) } fn read_from_db<T: Storable>(&self, key: &ExternalID) -> Result<T> { anyhow::ensure!(T::match_prefix(&key.prefix), "Key prefix doesn't match."); let request = GetRequest::new(key.to_bytes()); let response = self .storage_client .clone() .lock() .map_err(|_| anyhow!("Cannot lock storage client"))? .get(request)?; T::from_slice(response.value.as_slice()) } fn delete_from_db(&self, key: &ExternalID) -> Result<()> { let request = DeleteRequest::new(key.to_bytes()); self.storage_client .clone() .lock() .map_err(|_| anyhow!("Cannot lock storage client"))? .delete(request)?; Ok(()) } fn enqueue_to_db(&self, key: &[u8], item: &impl Storable) -> TeaclaveServiceResponseResult<()> { let value = item .to_vec() .map_err(|_| TeaclaveManagementServiceError::DataError)?; let enqueue_request = EnqueueRequest::new(key, value); let _enqueue_response = self .storage_client .clone() .lock() .map_err(|_| TeaclaveManagementServiceError::StorageError)? .enqueue(enqueue_request)?; Ok(()) } #[cfg(test_mode)] fn add_mock_data(&self) -> Result<()> { let mut output_file = self.create_fusion_data(vec!["mock_user1", "frontend_user"])?; output_file.uuid = Uuid::parse_str("00000000-0000-0000-0000-000000000001")?; output_file.cmac = Some(FileAuthTag::mock()); self.write_to_db(&output_file)?; let mut output_file = self.create_fusion_data(vec!["mock_user2", "mock_user3"])?; output_file.uuid = Uuid::parse_str("00000000-0000-0000-0000-000000000002")?; output_file.cmac = Some(FileAuthTag::mock()); self.write_to_db(&output_file)?; let mut input_file = TeaclaveInputFile::from_output(output_file)?; input_file.uuid = Uuid::parse_str("00000000-0000-0000-0000-000000000002")?; self.write_to_db(&input_file)?; let function_input = FunctionInput::new("input", "input_desc"); let function_output = FunctionOutput::new("output", "output_desc"); let function_input2 = FunctionInput::new("input2", "input_desc"); let function_output2 = FunctionOutput::new("output2", "output_desc"); let function = FunctionBuilder::new() .id(Uuid::parse_str("00000000-0000-0000-0000-000000000001").unwrap()) .name("mock-func-1") .description("mock-desc") .payload(b"mock-payload".to_vec()) .public(true) .arguments(vec!["arg1".to_string(), "arg2".to_string()]) .inputs(vec![function_input, function_input2]) .outputs(vec![function_output, function_output2]) .owner("teaclave".to_string()) .build(); self.write_to_db(&function)?; let function_output = FunctionOutput::new("output", "output_desc"); let function = FunctionBuilder::new() .id(Uuid::parse_str("00000000-0000-0000-0000-000000000002").unwrap()) .name("mock-func-2") .description("mock-desc") .payload(b"mock-payload".to_vec()) .public(true) .arguments(vec!["arg1".to_string()]) .outputs(vec![function_output]) .owner("teaclave".to_string()) .build(); self.write_to_db(&function)?; let function = FunctionBuilder::new() .id(Uuid::parse_str("00000000-0000-0000-0000-000000000003").unwrap()) .name("mock-func-3") .description("Private mock function") .payload(b"mock-payload".to_vec()) .public(false) .arguments(vec!["arg1".to_string()]) .owner("mock_user".to_string()) .user_allowlist(vec!["mock_user".to_string(), "mock_user1".to_string()]) .build(); self.write_to_db(&function)?; Ok(()) } } #[cfg(feature = "enclave_unit_test")] pub mod tests { use super::*; use serde_json::json; use std::collections::HashMap; use teaclave_types::{ hashmap, Executor, FileAuthTag, FileCrypto, FunctionArguments, FunctionInput, FunctionInputFile, FunctionOutput, FunctionOutputFile, }; use url::Url; pub fn handle_input_file() { let url = Url::parse("s3://bucket_id/path?token=mock_token").unwrap(); let cmac = FileAuthTag::mock(); let input_file = TeaclaveInputFile::new(url, cmac, FileCrypto::default(), vec!["mock_user"]); assert!(TeaclaveInputFile::match_prefix(&input_file.key_string())); let value = input_file.to_vec().unwrap(); let deserialized_file = TeaclaveInputFile::from_slice(&value).unwrap(); debug!("file: {:?}", deserialized_file); } pub fn handle_output_file() { let url = Url::parse("s3://bucket_id/path?token=mock_token").unwrap(); let output_file = TeaclaveOutputFile::new(url, FileCrypto::default(), vec!["mock_user"]); assert!(TeaclaveOutputFile::match_prefix(&output_file.key_string())); let value = output_file.to_vec().unwrap(); let deserialized_file = TeaclaveOutputFile::from_slice(&value).unwrap(); debug!("file: {:?}", deserialized_file); } pub fn handle_function() { let function_input = FunctionInput::new("input", "input_desc"); let function_output = FunctionOutput::new("output", "output_desc"); let function = FunctionBuilder::new() .id(Uuid::new_v4()) .name("mock_function") .description("mock function") .payload(b"python script".to_vec()) .arguments(vec!["arg".to_string()]) .inputs(vec![function_input]) .outputs(vec![function_output]) .public(true) .owner("mock_user") .build(); assert!(Function::match_prefix(&function.key_string())); let value = function.to_vec().unwrap(); let deserialized_function = Function::from_slice(&value).unwrap(); debug!("function: {:?}", deserialized_function); } pub fn handle_task() { let function = FunctionBuilder::new() .id(Uuid::new_v4()) .name("mock_function") .description("mock function") .payload(b"python script".to_vec()) .arguments(vec!["arg".to_string()]) .public(true) .owner("mock_user") .build(); let function_arguments = FunctionArguments::from_json(json!({"arg": "data"})).unwrap(); let task = Task::<Create>::new( UserID::from("mock_user"), Executor::MesaPy, function_arguments, HashMap::new(), HashMap::new(), function, ) .unwrap(); let ts: TaskState = task.try_into().unwrap(); let value = ts.to_vec().unwrap(); let deserialized_task = TaskState::from_slice(&value).unwrap(); debug!("task: {:?}", deserialized_task); } pub fn handle_staged_task() { let function = FunctionBuilder::new() .id(Uuid::new_v4()) .name("mock_function") .description("mock function") .payload(b"python script".to_vec()) .public(true) .owner("mock_user") .build(); let url = Url::parse("s3://bucket_id/path?token=mock_token").unwrap(); let cmac = FileAuthTag::mock(); let input_data = FunctionInputFile::new(url.clone(), cmac, FileCrypto::default()); let output_data = FunctionOutputFile::new(url, FileCrypto::default()); let staged_task = StagedTaskBuilder::new() .task_id(Uuid::new_v4()) .executor(Executor::MesaPy) .function_payload(function.payload) .function_arguments(hashmap!("arg" => "data")) .input_data(hashmap!("input" => input_data)) .output_data(hashmap!("output" => output_data)) .build(); let value = staged_task.to_vec().unwrap(); let deserialized_data = StagedTask::from_slice(&value).unwrap(); debug!("staged task: {:?}", deserialized_data); } }
36.980827
100
0.610238
874a2056520a50491c6987192dd4258ca4a11332
5,077
#[doc = "Register `EVENTS_TXDSENT` reader"] pub struct R(crate::R<EVENTS_TXDSENT_SPEC>); impl core::ops::Deref for R { type Target = crate::R<EVENTS_TXDSENT_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<EVENTS_TXDSENT_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<EVENTS_TXDSENT_SPEC>) -> Self { R(reader) } } #[doc = "Register `EVENTS_TXDSENT` writer"] pub struct W(crate::W<EVENTS_TXDSENT_SPEC>); impl core::ops::Deref for W { type Target = crate::W<EVENTS_TXDSENT_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<EVENTS_TXDSENT_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<EVENTS_TXDSENT_SPEC>) -> Self { W(writer) } } #[doc = "TWI TXD byte sent\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum EVENTS_TXDSENT_A { #[doc = "0: Event not generated"] NOTGENERATED = 0, #[doc = "1: Event generated"] GENERATED = 1, } impl From<EVENTS_TXDSENT_A> for bool { #[inline(always)] fn from(variant: EVENTS_TXDSENT_A) -> Self { variant as u8 != 0 } } #[doc = "Field `EVENTS_TXDSENT` reader - TWI TXD byte sent"] pub struct EVENTS_TXDSENT_R(crate::FieldReader<bool, EVENTS_TXDSENT_A>); impl EVENTS_TXDSENT_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { EVENTS_TXDSENT_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> EVENTS_TXDSENT_A { match self.bits { false => EVENTS_TXDSENT_A::NOTGENERATED, true => EVENTS_TXDSENT_A::GENERATED, } } #[doc = "Checks if the value of the field is `NOTGENERATED`"] #[inline(always)] pub fn is_not_generated(&self) -> bool { **self == EVENTS_TXDSENT_A::NOTGENERATED } #[doc = "Checks if the value of the field is `GENERATED`"] #[inline(always)] pub fn is_generated(&self) -> bool { **self == EVENTS_TXDSENT_A::GENERATED } } impl core::ops::Deref for EVENTS_TXDSENT_R { type Target = crate::FieldReader<bool, EVENTS_TXDSENT_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `EVENTS_TXDSENT` writer - TWI TXD byte sent"] pub struct EVENTS_TXDSENT_W<'a> { w: &'a mut W, } impl<'a> EVENTS_TXDSENT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EVENTS_TXDSENT_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Event not generated"] #[inline(always)] pub fn not_generated(self) -> &'a mut W { self.variant(EVENTS_TXDSENT_A::NOTGENERATED) } #[doc = "Event generated"] #[inline(always)] pub fn generated(self) -> &'a mut W { self.variant(EVENTS_TXDSENT_A::GENERATED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } impl R { #[doc = "Bit 0 - TWI TXD byte sent"] #[inline(always)] pub fn events_txdsent(&self) -> EVENTS_TXDSENT_R { EVENTS_TXDSENT_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - TWI TXD byte sent"] #[inline(always)] pub fn events_txdsent(&mut self) -> EVENTS_TXDSENT_W { EVENTS_TXDSENT_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "TWI TXD byte sent\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_txdsent](index.html) module"] pub struct EVENTS_TXDSENT_SPEC; impl crate::RegisterSpec for EVENTS_TXDSENT_SPEC { type Ux = u32; } #[doc = "`read()` method returns [events_txdsent::R](R) reader structure"] impl crate::Readable for EVENTS_TXDSENT_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [events_txdsent::W](W) writer structure"] impl crate::Writable for EVENTS_TXDSENT_SPEC { type Writer = W; } #[doc = "`reset()` method sets EVENTS_TXDSENT to value 0"] impl crate::Resettable for EVENTS_TXDSENT_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.534161
412
0.614733
d92d1bc7c1b9cbfc768aaefe1003631c9fba47ea
2,263
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![deny(clippy::pedantic)] use crate::packet::QuicVersion; use crate::{Error, Res}; use neqo_common::qerror; use neqo_crypto::{aead::Aead, hkdf, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; use std::cell::RefCell; const RETRY_SECRET_27: &[u8] = &[ 0x65, 0x6e, 0x61, 0xe3, 0x36, 0xae, 0x94, 0x17, 0xf7, 0xf0, 0xed, 0xd8, 0xd7, 0x8d, 0x46, 0x1e, 0x2a, 0xa7, 0x08, 0x4a, 0xba, 0x7a, 0x14, 0xc1, 0xe9, 0xf7, 0x26, 0xd5, 0x57, 0x09, 0x16, 0x9a, ]; const RETRY_SECRET_29: &[u8] = &[ 0x8b, 0x0d, 0x37, 0xeb, 0x85, 0x35, 0x02, 0x2e, 0xbc, 0x8d, 0x76, 0xa2, 0x07, 0xd8, 0x0d, 0xf2, 0x26, 0x46, 0xec, 0x06, 0xdc, 0x80, 0x96, 0x42, 0xc3, 0x0a, 0x8b, 0xaa, 0x2b, 0xaa, 0xff, 0x4c, ]; /// The AEAD used for Retry is fixed, so use thread local storage. fn make_aead(secret: &[u8]) -> Aead { #[cfg(debug_assertions)] ::neqo_crypto::assert_initialized(); let secret = hkdf::import_key(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, secret).unwrap(); Aead::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic ").unwrap() } thread_local!(static RETRY_AEAD_27: RefCell<Aead> = RefCell::new(make_aead(RETRY_SECRET_27))); thread_local!(static RETRY_AEAD_29: RefCell<Aead> = RefCell::new(make_aead(RETRY_SECRET_29))); /// Run a function with the appropriate Retry AEAD. pub fn use_aead<F, T>(quic_version: QuicVersion, f: F) -> Res<T> where F: FnOnce(&Aead) -> Res<T>, { match quic_version { QuicVersion::Draft27 | QuicVersion::Draft28 => &RETRY_AEAD_27, QuicVersion::Draft29 => &RETRY_AEAD_29, } .try_with(|aead| f(&aead.borrow())) .map_err(|e| { qerror!("Unable to access Retry AEAD: {:?}", e); Error::InternalError })? } /// Determine how large the expansion is for a given key. pub fn expansion(quic_version: QuicVersion) -> usize { if let Ok(ex) = use_aead(quic_version, |aead| Ok(aead.expansion())) { ex } else { panic!("Unable to access Retry AEAD") } }
37.098361
99
0.676977
d5a0b6733640f4fa96c8f6ea8e88fe2610c40fe8
2,423
//! The intention of this crate is to allow the use of a `Rc<RefCell<T>>` throughout code where, after enough testing //! has been done, the `unchecked` feature can be enabled, which will convert `SCell` into a `Rc<UnsafeCell<T>>`. //! //! `SCell` provides all of the things that the combination of `Rc<RefCell<T>>` normally allow and some more, such as //! implementations for `PartialOrd` and `Ord`. //! //! If you plan to do significant testing in `debug` mode, add the `unchecked` feature to this crate in `release` mode. //! Otherwise, it might be best to enable optimizations in `debug` so you can test in `debug` or to create a new //! profile for testing of optimized binaries that still do the runtime checking that RefCell provides. Once you have //! performed your testing, use a compile mode with the `unchecked` feature enabled for this crate and it will remove //! the overhead from `RefCell`, but not from `Rc` since it still needs to know when to `drop()` the value. //! //! Alternatively, feel free to use this crate for normal use in graphs, meshes, and other recurrent data structures //! with lots of interconnectivity where the borrow checker simply can't help. Later, if your code works fine and you //! need the performance back from `RefCell`, just use the `unchecked` feature and your code will be good to go. #![feature(coerce_unsized, unsize)] #[cfg(not(feature = "unchecked"))] mod checked; #[cfg(not(feature = "unchecked"))] pub use checked::*; #[cfg(feature = "unchecked")] mod unchecked; #[cfg(feature = "unchecked")] pub use unchecked::*; use std::fmt::{Formatter, Display, Debug, Error}; use std::hash::{Hasher, Hash}; use std::ops::CoerceUnsized; use std::marker::Unsize; impl<T, U> CoerceUnsized<SCell<U>> for SCell<T> where T: Unsize<U> + ?Sized, U: ?Sized {} impl<T: ?Sized> Hash for SCell<T> where T: Hash { #[inline] fn hash<H>(&self, state: &mut H) where H: Hasher { self.borrow().hash(state); } } impl<T: ?Sized> Display for SCell<T> where T: Display { #[inline] fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { self.borrow().fmt(f) } } impl<T: ?Sized> Debug for SCell<T> where T: Debug { #[inline] fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { self.borrow().fmt(f) } } impl<T> From<T> for SCell<T> { #[inline] fn from(t: T) -> Self { SCell::new(t) } }
34.614286
119
0.66818
e5c9ae4edb47fbedf5a28b5f357eeb4d9b829a4b
8,622
use crate::csc::CscMatrix; use crate::ops::serial::cs::{spadd_cs_prealloc, spmm_cs_dense, spmm_cs_prealloc}; use crate::ops::serial::{OperationError, OperationErrorKind}; use crate::ops::Op; use nalgebra::{ClosedAdd, ClosedMul, DMatrixSlice, DMatrixSliceMut, RealField, Scalar}; use num_traits::{One, Zero}; use std::borrow::Cow; /// Sparse-dense matrix-matrix multiplication `C <- beta * C + alpha * op(A) * op(B)`. /// /// # Panics /// /// Panics if the dimensions of the matrices involved are not compatible with the expression. pub fn spmm_csc_dense<'a, T>( beta: T, c: impl Into<DMatrixSliceMut<'a, T>>, alpha: T, a: Op<&CscMatrix<T>>, b: Op<impl Into<DMatrixSlice<'a, T>>>, ) where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { let b = b.convert(); spmm_csc_dense_(beta, c.into(), alpha, a, b) } fn spmm_csc_dense_<T>( beta: T, c: DMatrixSliceMut<'_, T>, alpha: T, a: Op<&CscMatrix<T>>, b: Op<DMatrixSlice<'_, T>>, ) where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spmm_dims!(c, a, b); // Need to interpret matrix as transposed since the spmm_cs_dense function assumes CSR layout let a = a.transposed().map_same_op(|a| &a.cs); spmm_cs_dense(beta, c, alpha, a, b) } /// Sparse matrix addition `C <- beta * C + alpha * op(A)`. /// /// If the pattern of `c` does not accommodate all the non-zero entries in `a`, an error is /// returned. /// /// # Panics /// /// Panics if the dimensions of the matrices involved are not compatible with the expression. pub fn spadd_csc_prealloc<T>( beta: T, c: &mut CscMatrix<T>, alpha: T, a: Op<&CscMatrix<T>>, ) -> Result<(), OperationError> where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) } /// Sparse-sparse matrix multiplication, `C <- beta * C + alpha * op(A) * op(B)`. /// /// # Errors /// /// If the sparsity pattern of `C` is not able to store the result of the operation, /// an error is returned. /// /// # Panics /// /// Panics if the dimensions of the matrices involved are not compatible with the expression. pub fn spmm_csc_prealloc<T>( beta: T, c: &mut CscMatrix<T>, alpha: T, a: Op<&CscMatrix<T>>, b: Op<&CscMatrix<T>>, ) -> Result<(), OperationError> where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spmm_dims!(c, a, b); use Op::{NoOp, Transpose}; match (&a, &b) { (NoOp(ref a), NoOp(ref b)) => { // Note: We have to reverse the order for CSC matrices spmm_cs_prealloc(beta, &mut c.cs, alpha, &b.cs, &a.cs) } _ => { // Currently we handle transposition by explicitly precomputing transposed matrices // and calling the operation again without transposition let a_ref: &CscMatrix<T> = a.inner_ref(); let b_ref: &CscMatrix<T> = b.inner_ref(); let (a, b) = { use Cow::*; match (&a, &b) { (NoOp(_), NoOp(_)) => unreachable!(), (Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)), (NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())), (Transpose(ref a), Transpose(ref b)) => { (Owned(a.transpose()), Owned(b.transpose())) } } }; spmm_csc_prealloc(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref())) } } } /// Solve the lower triangular system `op(L) X = B`. /// /// Only the lower triangular part of L is read, and the result is stored in B. /// /// # Errors /// /// An error is returned if the system can not be solved due to the matrix being singular. /// /// # Panics /// /// Panics if `L` is not square, or if `L` and `B` are not dimensionally compatible. pub fn spsolve_csc_lower_triangular<'a, T: RealField>( l: Op<&CscMatrix<T>>, b: impl Into<DMatrixSliceMut<'a, T>>, ) -> Result<(), OperationError> { let b = b.into(); let l_matrix = l.into_inner(); assert_eq!( l_matrix.nrows(), l_matrix.ncols(), "Matrix must be square for triangular solve." ); assert_eq!( l_matrix.nrows(), b.nrows(), "Dimension mismatch in sparse lower triangular solver." ); match l { Op::NoOp(a) => spsolve_csc_lower_triangular_no_transpose(a, b), Op::Transpose(a) => spsolve_csc_lower_triangular_transpose(a, b), } } fn spsolve_csc_lower_triangular_no_transpose<T: RealField>( l: &CscMatrix<T>, b: DMatrixSliceMut<'_, T>, ) -> Result<(), OperationError> { let mut x = b; // Solve column-by-column for j in 0..x.ncols() { let mut x_col_j = x.column_mut(j); for k in 0..l.ncols() { let l_col_k = l.col(k); // Skip entries above the diagonal // TODO: Can use exponential search here to quickly skip entries // (we'd like to avoid using binary search as it's very cache unfriendly // and the matrix might actually *be* lower triangular, which would induce // a severe penalty) let diag_csc_index = l_col_k.row_indices().iter().position(|&i| i == k); if let Some(diag_csc_index) = diag_csc_index { let l_kk = l_col_k.values()[diag_csc_index].clone(); if l_kk != T::zero() { // Update entry associated with diagonal x_col_j[k] /= l_kk; // Copy value after updating (so we don't run into the borrow checker) let x_kj = x_col_j[k].clone(); let row_indices = &l_col_k.row_indices()[(diag_csc_index + 1)..]; let l_values = &l_col_k.values()[(diag_csc_index + 1)..]; // Note: The remaining entries are below the diagonal for (&i, l_ik) in row_indices.iter().zip(l_values) { let x_ij = &mut x_col_j[i]; *x_ij -= l_ik.clone() * x_kj.clone(); } x_col_j[k] = x_kj; } else { return spsolve_encountered_zero_diagonal(); } } else { return spsolve_encountered_zero_diagonal(); } } } Ok(()) } fn spsolve_encountered_zero_diagonal() -> Result<(), OperationError> { let message = "Matrix contains at least one diagonal entry that is zero."; Err(OperationError::from_kind_and_message( OperationErrorKind::Singular, String::from(message), )) } fn spsolve_csc_lower_triangular_transpose<T: RealField>( l: &CscMatrix<T>, b: DMatrixSliceMut<'_, T>, ) -> Result<(), OperationError> { let mut x = b; // Solve column-by-column for j in 0..x.ncols() { let mut x_col_j = x.column_mut(j); // Due to the transposition, we're essentially solving an upper triangular system, // and the columns in our matrix become rows for i in (0..l.ncols()).rev() { let l_col_i = l.col(i); // Skip entries above the diagonal // TODO: Can use exponential search here to quickly skip entries let diag_csc_index = l_col_i.row_indices().iter().position(|&k| i == k); if let Some(diag_csc_index) = diag_csc_index { let l_ii = l_col_i.values()[diag_csc_index].clone(); if l_ii != T::zero() { // // Update entry associated with diagonal // x_col_j[k] /= a_kk; // Copy value after updating (so we don't run into the borrow checker) let mut x_ii = x_col_j[i].clone(); let row_indices = &l_col_i.row_indices()[(diag_csc_index + 1)..]; let a_values = &l_col_i.values()[(diag_csc_index + 1)..]; // Note: The remaining entries are below the diagonal for (k, l_ki) in row_indices.iter().zip(a_values) { let x_kj = x_col_j[*k].clone(); x_ii -= l_ki.clone() * x_kj; } x_col_j[i] = x_ii / l_ii; } else { return spsolve_encountered_zero_diagonal(); } } else { return spsolve_encountered_zero_diagonal(); } } } Ok(()) }
33.679688
97
0.561239
627bc1f9463c1de5c385ff3a284079a72daf912d
1,135
use crate::{ dds::{ddsdata::DDSData, with_key::datawriter::WriteOptions}, structure::{guid::GUID, sequence_number::SequenceNumber}, }; #[derive(Debug, PartialOrd, PartialEq, Ord, Eq, Copy, Clone)] pub enum ChangeKind { Alive, NotAliveDisposed, NotAliveUnregistered, } #[derive(Debug, Clone)] pub struct CacheChange { pub writer_guid: GUID, pub sequence_number: SequenceNumber, pub write_options: WriteOptions, pub data_value: DDSData, } #[cfg(test)] impl PartialEq for CacheChange { fn eq(&self, other: &Self) -> bool { self.writer_guid == other.writer_guid && self.sequence_number == other.sequence_number && self.write_options == other.write_options && self.data_value == other.data_value } } impl CacheChange { pub fn new( writer_guid: GUID, sequence_number: SequenceNumber, write_options: WriteOptions, data_value: DDSData, ) -> CacheChange { CacheChange { writer_guid, sequence_number, write_options, data_value, } } // Not needed? // pub fn change_kind(&self) -> ChangeKind { // self.data_value.change_kind() // } }
22.254902
62
0.678414
bbb9e949bfa52db9d81dbcb8ac68fef94c893d10
9,567
// MIT/Apache2 License use super::EstablishConnectionFuture; use crate::{ auth_info::AuthInfo, display::{ReadPacketFuture, SendPacketFuture}, }; use core::task::{Context, Poll}; #[cfg(all(feature = "std", unix))] use super::unix; use crate::Fd; use alloc::vec::Vec; #[cfg(not(unix))] use core::pin::Pin; #[cfg(all(feature = "std", not(unix)))] use futures_lite::{AsyncRead, AsyncWrite}; #[cfg(all(feature = "std", not(unix)))] use std::io; #[cfg(feature = "std")] use async_io::Async; #[cfg(feature = "std")] use std::net::TcpStream; #[cfg(all(feature = "std", unix))] use std::os::unix::net::UnixStream; #[cfg(not(unix))] use super::standard_fd_warning; #[cfg(all(not(unix), feature = "std"))] use futures_lite::io::{AsyncReadExt, AsyncWriteExt}; #[cfg(all(not(unix), feature = "tokio-support"))] use tokio_util::compat::{TokioAsyncReadCompatExt as _, TokioAsyncWriteCompatExt as _}; /// Asynchronous breadx connection. pub trait AsyncConnection { /// Send a packet across the connection in an async manner. fn poll_send_packet( &mut self, bytes: &[u8], fds: &mut Vec<Fd>, cx: &mut Context<'_>, bytes_written: &mut usize, ) -> Poll<crate::Result>; /// Read a packet from the connection in an async manner. fn poll_read_packet( &mut self, bytes: &mut [u8], fds: &mut Vec<Fd>, cx: &mut Context<'_>, bytes_read: &mut usize, ) -> Poll<crate::Result>; /// Establish a connection to the server. #[inline] fn establish_async( &mut self, auth_info: Option<AuthInfo>, ) -> EstablishConnectionFuture<'_, Self> { EstablishConnectionFuture::run(self, auth_info) } } impl<C: AsyncConnection + ?Sized> AsyncConnection for &mut C { #[inline] fn poll_send_packet( &mut self, bytes: &[u8], fds: &mut Vec<Fd>, cx: &mut Context<'_>, bytes_written: &mut usize, ) -> Poll<crate::Result> { (**self).poll_send_packet(bytes, fds, cx, bytes_written) } #[inline] fn poll_read_packet( &mut self, bytes: &mut [u8], fds: &mut Vec<Fd>, cx: &mut Context<'_>, bytes_read: &mut usize, ) -> Poll<crate::Result> { (**self).poll_read_packet(bytes, fds, cx, bytes_read) } } /// Extension trait for `AsyncConnection` that provides futures. pub trait AsyncConnectionExt { fn read_packet_async<'a, 'b, 'c>( &'a mut self, bytes: &'b mut [u8], fds: &'c mut Vec<Fd>, ) -> ReadPacketFuture<'a, 'b, 'c, Self>; fn send_packet_async<'a, 'b, 'c>( &'a mut self, bytes: &'b [u8], fds: &'c mut Vec<Fd>, ) -> SendPacketFuture<'a, 'b, 'c, Self>; } impl<C: AsyncConnection + ?Sized> AsyncConnectionExt for C { #[inline] fn read_packet_async<'a, 'b, 'c>( &'a mut self, bytes: &'b mut [u8], fds: &'c mut Vec<Fd>, ) -> ReadPacketFuture<'a, 'b, 'c, Self> { ReadPacketFuture::run(self, bytes, fds) } #[inline] fn send_packet_async<'a, 'b, 'c>( &'a mut self, bytes: &'b [u8], fds: &'c mut Vec<Fd>, ) -> SendPacketFuture<'a, 'b, 'c, Self> { SendPacketFuture::run(self, bytes, fds) } } macro_rules! unix_aware_async_connection_impl { ($name: ty) => { impl AsyncConnection for $name { #[inline] fn poll_send_packet( &mut self, bytes: &[u8], fds: &mut Vec<Fd>, cx: &mut Context<'_>, bytes_written: &mut usize, ) -> Poll<crate::Result> { cfg_if::cfg_if! { if #[cfg(unix)] { unix::poll_send_packet_unix(self, bytes, fds, cx, bytes_written) } else { standard_fd_warning(fds); let mut bytes = bytes; while !bytes.is_empty() { match Pin::new(&mut *self).poll_write(cx, bytes) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), Poll::Ready(Ok(0)) => { let err: io::Error = io::ErrorKind::WriteZero.into(); return Poll::Ready(Err(err.into())); } Poll::Ready(Ok(n)) => { bytes = &bytes[n..]; *bytes_written += n; } } } Poll::Ready(Ok(())) } } } #[inline] fn poll_read_packet( &mut self, bytes: &mut [u8], fds: &mut Vec<Fd>, cx: &mut Context<'_>, bytes_read: &mut usize, ) -> Poll<crate::Result> { cfg_if::cfg_if! { if #[cfg(unix)] { unix::poll_read_packet_unix(self, bytes, fds, cx, bytes_read) } else { let _ = fds; let mut bytes = bytes; while !bytes.is_empty() { match Pin::new(&mut *self).poll_read(cx, bytes) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), Poll::Ready(Ok(0)) => { let err: io::Error = io::ErrorKind::UnexpectedEof.into(); return Poll::Ready(Err(err.into())); } Poll::Ready(Ok(n)) => { bytes = &mut bytes[n..]; *bytes_read += n; } } } Poll::Ready(Ok(())) } } } } }; } // NOTE: In the past, these were "async_net::TcpStream" and "async_net::os::unix::UnixStream". // However, neither implement AsyncRead or AsyncWrite for immutable access. The underlying // "Async" primitive, however, does. #[cfg(feature = "std")] unix_aware_async_connection_impl! { Async<TcpStream> } #[cfg(all(feature = "std", unix))] unix_aware_async_connection_impl! { Async<UnixStream> } #[cfg(feature = "std")] unix_aware_async_connection_impl! { &Async<TcpStream> } #[cfg(all(feature = "std", unix))] unix_aware_async_connection_impl! { &Async<UnixStream> } #[cfg(all(feature = "tokio-support", unix))] unix_aware_async_connection_impl! { tokio::net::UnixStream } #[cfg(feature = "tokio-support")] impl AsyncConnection for tokio::net::TcpStream { #[inline] fn poll_send_packet( &mut self, bytes: &[u8], fds: &mut Vec<Fd>, cx: &mut Context<'_>, bytes_written: &mut usize, ) -> Poll<crate::Result> { cfg_if::cfg_if! { if #[cfg(unix)] { unix::poll_send_packet_unix(self, bytes, fds, cx, bytes_written) } else { standard_fd_warning(fds); let mut bytes = bytes; let mut this = self.compat(); while !bytes.is_empty() { match Pin::new(&mut this).poll_write(cx, bytes) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), Poll::Ready(Ok(0)) => { let err: io::Error = io::ErrorKind::WriteZero.into(); return Poll::Ready(Err(err.into())); } Poll::Ready(Ok(n)) => { bytes = &bytes[n..]; *bytes_written += n; } } } Poll::Ready(Ok(())) } } } #[inline] fn poll_read_packet( &mut self, bytes: &mut [u8], fds: &mut Vec<Fd>, cx: &mut Context<'_>, bytes_read: &mut usize, ) -> Poll<crate::Result> { cfg_if::cfg_if! { if #[cfg(unix)] { unix::poll_read_packet_unix(self, bytes, fds, cx, bytes_read) } else { let _ = fds; let mut bytes = bytes; let mut this = self.compat(); while !bytes.is_empty() { match Pin::new(&mut this).poll_read(cx, bytes) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), Poll::Ready(Ok(0)) => { let err: io::Error = io::ErrorKind::UnexpectedEof.into(); return Poll::Ready(Err(err.into())); } Poll::Ready(Ok(n)) => { bytes = &mut bytes[n..]; *bytes_read += n; } } } Poll::Ready(Ok(())) } } } }
33.334495
96
0.456465
f5505eede26f05bde83adcd783758f9c21e2e8fd
649
use std::{self, io, num, str}; pub type Result<T> = std::result::Result<T, Error>; #[derive(Debug)] pub enum Error { Str(&'static str), Io(io::Error), Utf8(str::Utf8Error), Int(num::ParseIntError), } impl From<&'static str> for Error { fn from(e: &'static str) -> Self { Error::Str(e) } } impl From<io::Error> for Error { fn from(e: io::Error) -> Self { Error::Io(e) } } impl From<str::Utf8Error> for Error { fn from(e: str::Utf8Error) -> Self { Error::Utf8(e) } } impl From<num::ParseIntError> for Error { fn from(e: num::ParseIntError) -> Self { Error::Int(e) } }
19.666667
51
0.560863
64db08a65d266906e8f61c3d0c39a1adfc590eef
1,512
// TODO: Remove this attribute once this type is used (#2603). #![allow(dead_code)] use tokio::sync::watch; use super::RecentSyncLengths; #[cfg(test)] mod tests; /// A helper type to determine if the synchronizer has likely reached the chain tip. /// /// This type can be used as a handle, so cloning it is cheap. #[derive(Clone, Debug)] pub struct SyncStatus { latest_sync_length: watch::Receiver<Vec<usize>>, } impl SyncStatus { /// Create an instance of [`SyncStatus`]. /// /// The status is determined based on the latest counts of synchronized blocks, observed /// through `latest_sync_length`. pub fn new() -> (Self, RecentSyncLengths) { let (recent_sync_lengths, latest_sync_length) = RecentSyncLengths::new(); let status = SyncStatus { latest_sync_length }; (status, recent_sync_lengths) } /// Wait until the synchronization is likely close to the tip. /// /// Returns an error if communication with the synchronizer is lost. pub async fn wait_until_close_to_tip(&mut self) -> Result<(), watch::error::RecvError> { while !self.is_close_to_tip() { self.latest_sync_length.changed().await?; } Ok(()) } /// Check if the synchronization is likely close to the chain tip. pub fn is_close_to_tip(&self) -> bool { let _sync_lengths = self.latest_sync_length.borrow(); // TODO: Determine if the synchronization is actually close to the tip (#2592). true } }
30.24
92
0.66336
64e14cd30373b8ffd7f14463e7cc8deec6deeefd
633
pub fn get_first_char(text: &str) -> char { text.chars().next().unwrap_or_default() } pub fn char_to_usize(text: char) -> usize { text as usize } pub fn text_regex() { use regex::Regex; let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); assert!(re.is_match("2014-01-01")); } pub fn hex_to_char(s: &str) -> Result<char, std::num::ParseIntError> { u8::from_str_radix(s, 16).map(|n| n as char) } #[cfg(test)] mod tests { use super::*; #[test] fn test_hex_to_char() { let v = vec!["00", "4b", "4c"]; for s in v { println!("{:?}", hex_to_char(s)); } } }
20.419355
70
0.541864
50ad358dc8accf4aa9fcc9f47ef1d8aa8a0b92a4
426
use serde::{Deserialize, Serialize}; use std::fmt::Debug; use wfbp_discord::{middleware::ClientSecret, models::Snowflake}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Config { pub app_id: Snowflake, pub client_id: Snowflake, pub client_secret: ClientSecret, #[serde(rename = "functions_customhandler_port", default = "default_port")] pub port: u16, } fn default_port() -> u16 { 3000 }
25.058824
79
0.706573
0e0ab92ea97a6317e33c984aad43a1e1a4a18a5f
6,573
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{config::global::Config as GlobalConfig, errors::*}; use std::str::FromStr; use transaction_builder::transaction_argument::parse_as_transaction_argument; use types::transaction::TransactionArgument; /// A partially parsed transaction argument. #[derive(Debug)] pub enum Argument { AddressOf(String), SelfContained(TransactionArgument), } impl FromStr for Argument { type Err = Error; fn from_str(s: &str) -> Result<Self> { if let Ok(arg) = parse_as_transaction_argument(s) { return Ok(Argument::SelfContained(arg)); } if s.starts_with("{{") && s.ends_with("}}") { return Ok(Argument::AddressOf(s[2..s.len() - 2].to_string())); } Err(ErrorKind::Other(format!("failed to parse '{}' as argument", s)).into()) } } /// A raw entry extracted from the input. Used to build a transaction config table. #[derive(Debug)] pub enum Entry { NoVerify, NoExecute, Sender(String), Arguments(Vec<Argument>), } impl FromStr for Entry { type Err = Error; fn from_str(s: &str) -> Result<Self> { let s = s.trim_start().trim_end(); if !s.starts_with("//!") { return Err( ErrorKind::Other("txn config entry must start with //!".to_string()).into(), ); } let s = s[3..].trim_start(); match s { "no-verify" => { return Ok(Entry::NoVerify); } "no-execute" => { return Ok(Entry::NoExecute); } _ => {} } if s.starts_with("sender:") { let s = s[7..].trim_start().trim_end(); if s.is_empty() { return Err(ErrorKind::Other("sender cannot be empty".to_string()).into()); } return Ok(Entry::Sender(s.to_ascii_lowercase())); } if s.starts_with("args:") { let res: Result<Vec<_>> = s[5..] .split(',') .map(|s| s.trim_start().trim_end()) .filter(|s| !s.is_empty()) .map(|s| s.parse::<Argument>()) .collect(); return Ok(Entry::Arguments(res?)); } Err(ErrorKind::Other(format!( "failed to parse '{}' as transaction config entry", s )) .into()) } } /// Checks whether a line denotes the start of a new transaction. pub fn is_new_transaction(s: &str) -> bool { let s = s.trim(); if !s.starts_with("//!") { return false; } s[3..].trim_start() == "new-transaction" } impl Entry { pub fn try_parse(s: &str) -> Result<Option<Self>> { if s.starts_with("//!") { Ok(Some(s.parse::<Entry>()?)) } else { Ok(None) } } } /// A table of options specific to one transaction, fine tweaking how the transaction /// is handled by the testing infra. #[derive(Debug)] pub struct Config { pub no_verify: bool, pub no_execute: bool, pub sender: String, pub args: Vec<TransactionArgument>, } impl Config { /// Builds a transaction config table from raw entries. pub fn build(config: &GlobalConfig, entries: &[Entry]) -> Result<Self> { let mut no_verify = None; let mut no_execute = None; let mut sender = None; let mut args = None; for entry in entries { match entry { Entry::NoVerify => match no_verify { None => { no_verify = Some(true); } _ => { return Err( ErrorKind::Other("flag 'no-verify' already set".to_string()).into() ); } }, Entry::NoExecute => match no_execute { None => { no_execute = Some(true); } _ => { return Err( ErrorKind::Other("flag 'no-execute' already set".to_string()).into(), ); } }, Entry::Sender(name) => match sender { None => { if config.accounts.contains_key(name) { sender = Some(name.to_string()) } else { return Err(ErrorKind::Other(format!( "account '{}' does not exist", name )) .into()); } } _ => return Err(ErrorKind::Other("sender already set".to_string()).into()), }, Entry::Arguments(raw_args) => match args { None => { args = Some( raw_args .iter() .map(|arg| match arg { Argument::AddressOf(name) => match config.accounts.get(name) { Some(data) => { Ok(TransactionArgument::Address(*data.address())) } None => Err(ErrorKind::Other(format!( "account '{}' does not exist", name )) .into()), }, Argument::SelfContained(arg) => Ok(arg.clone()), }) .collect::<Result<Vec<_>>>()?, ); } _ => { return Err(ErrorKind::Other( "transaction arguments already set".to_string(), ) .into()) } }, } } Ok(Config { no_verify: no_verify.unwrap_or(false), no_execute: no_execute.unwrap_or(false), sender: sender.unwrap_or_else(|| "default".to_string()), args: args.unwrap_or_else(|| vec![]), }) } }
33.707692
98
0.428724
8a013aa376530e02c62e9504e08670181583b0cf
238
use crate::{IncomingConnection, Packet, SendReceipt}; #[derive(Debug, PartialEq)] pub enum PeerEvent { Packet(Packet), SendReceiptAcked(SendReceipt), SendReceiptLoss(SendReceipt), IncomingConnection(IncomingConnection), }
26.444444
53
0.756303
eda130ea1ef8f898e67c9a8d50295bfef760e5ae
3,726
//! On-board user LEDs use core::ops; use hal::prelude::*; use hal::gpio::gpioe::{self, PEx, PE10, PE11, PE12, PE13, PE14, PE15, PE8, PE9}; use hal::gpio::{Output, PushPull}; /// North LED pub type LD3 = PE9<Output<PushPull>>; /// Northeast LED pub type LD5 = PE10<Output<PushPull>>; /// East LED pub type LD7 = PE11<Output<PushPull>>; /// Southeast LED pub type LD9 = PE12<Output<PushPull>>; /// South LED pub type LD10 = PE13<Output<PushPull>>; /// Southwest LED pub type LD8 = PE14<Output<PushPull>>; /// West LED pub type LD6 = PE15<Output<PushPull>>; /// Northwest LED pub type LD4 = PE8<Output<PushPull>>; /// Cardinal directions. Each one matches one of the user LEDs. pub enum Direction { /// North / LD3 North, /// Northeast / LD5 Northeast, /// East / LD7 East, /// Southeast / LD9 Southeast, /// South / LD10 South, /// Southwest / LD8 Southwest, /// West / LD6 West, /// Northwest / LD4 Northwest, } /// Array of all the user LEDs on the board pub struct Leds { leds: [Led; 8], } impl Leds { /// Initializes all the user LEDs pub fn new(mut gpioe: gpioe::Parts) -> Self { let n = gpioe .pe9 .into_push_pull_output(&mut gpioe.moder, &mut gpioe.otyper); let ne = gpioe .pe10 .into_push_pull_output(&mut gpioe.moder, &mut gpioe.otyper); let e = gpioe .pe11 .into_push_pull_output(&mut gpioe.moder, &mut gpioe.otyper); let se = gpioe .pe12 .into_push_pull_output(&mut gpioe.moder, &mut gpioe.otyper); let s = gpioe .pe13 .into_push_pull_output(&mut gpioe.moder, &mut gpioe.otyper); let sw = gpioe .pe14 .into_push_pull_output(&mut gpioe.moder, &mut gpioe.otyper); let w = gpioe .pe15 .into_push_pull_output(&mut gpioe.moder, &mut gpioe.otyper); let nw = gpioe .pe8 .into_push_pull_output(&mut gpioe.moder, &mut gpioe.otyper); Leds { leds: [ n.into(), ne.into(), e.into(), se.into(), s.into(), sw.into(), w.into(), nw.into(), ], } } } impl ops::Deref for Leds { type Target = [Led]; fn deref(&self) -> &[Led] { &self.leds } } impl ops::DerefMut for Leds { fn deref_mut(&mut self) -> &mut [Led] { &mut self.leds } } impl ops::Index<usize> for Leds { type Output = Led; fn index(&self, i: usize) -> &Led { &self.leds[i] } } impl ops::Index<Direction> for Leds { type Output = Led; fn index(&self, d: Direction) -> &Led { &self.leds[d as usize] } } impl ops::IndexMut<usize> for Leds { fn index_mut(&mut self, i: usize) -> &mut Led { &mut self.leds[i] } } impl ops::IndexMut<Direction> for Leds { fn index_mut(&mut self, d: Direction) -> &mut Led { &mut self.leds[d as usize] } } /// One of the on-board user LEDs pub struct Led { pex: PEx<Output<PushPull>>, } macro_rules! ctor { ($($ldx:ident),+) => { $( impl Into<Led> for $ldx { fn into(self) -> Led { Led { pex: self.downgrade(), } } } )+ } } ctor!(LD3, LD4, LD5, LD6, LD7, LD8, LD9, LD10); impl Led { /// Turns the LED off pub fn off(&mut self) { self.pex.set_low() } /// Turns the LED on pub fn on(&mut self) { self.pex.set_high() } }
21.170455
80
0.517445