hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
5d4b319a329ef94d3890b25e9c3339bfa358ea53
68,141
use smallvec::{SmallVec}; use std::{i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize, str}; use std::any::{type_name}; use std::cell::{Ref}; use std::cmp::{Ordering}; use std::collections::{BTreeMap, HashMap, VecDeque}; use std::convert::{TryInto}; use std::hash::{BuildHasher, Hash}; use std::io::{Write}; use std::iter::{Extend, FromIterator}; use std::ffi::{CString, CStr, OsString, OsStr}; use std::path::{Path, PathBuf}; use super::code::{Coro, GFn}; use super::collections::{Arr, DequeAccess, DequeOps, Deque, Str, Tab}; use super::class::{Class, Obj}; use super::engine::{glsp, RData, RFn, RRoot, RStore, stock_syms::*, Sym}; use super::error::{GResult}; use super::eval::{EnvMode, Expander}; use super::gc::{Gc, Root, Slot}; use super::iter::{GIter, Iterable, GIterLen}; use super::val::{Num, Val}; /* rfn!() takes an arbitrary function or non-capturing closure whose arguments all implement MakeArg and whose return value implements IntoResult. it returns a minimum and maximum arg count, and a monomorphized fn ptr, `fn(&[Slot]) -> GResult<Slot>` MakeArg is implemented for a long list of types: anything that implements FromVal, and also references to libraries, references to RData, references to string types and primitive types like Arr, Option<T> for optional args, &[T] for rest arguments, and OrNil<T>. could potentially support Option<T>, &[T] and &mut [T] in tuples? if we find a way for pattern-matches to "cheaply fail" (perhaps returning a new result type which encodes the reason for the conversion failure as plain old data, rather than stringifying it straight away?), then we could support the `either` crate. this would make OrNil unnecessary; replace it with Either<(), T>. */ /* notes on the implementation: the ToVal and FromVal traits are user-facing. if the user has or wants a Val, they can invoke these traits directly with say i32::from_val(val) or my_i32.to_val(). the user can implement these traits for their own types. both of those traits have a "secret" method: into_slot or from_slot respectively. they are doc(hidden), with a default implementation that works for any valid to_val or from_val implementation. the "secret" methods are invoked whenever rust data is passed into or out of glsp's internals (rfn return values, and the arguments and return values to various methods on Arr, Tab, Obj, Class and glsp::). we implement the "secret" methods for types which want to avoid creating a temporary root, like Val or Root<Arr>. we want arguments to methods like tab.get(key) to perform autoderef: that is, they should accept i32, &i32, &&&i32, &mut i32, etc. we achieve that by blanket-implementing ToVal for T where T: Deref, T::Target: ToVal. note that this prevents the user from directly implementing ToVal for anything that implements Deref. the MakeArg trait, and its parent MakeTemp, are used for rfn arguments. they have a blanket implementation over T: FromVal. they also provide implementations for certain "special" types like Option<T>, &Arr, and &[T]. they're implemented individually for references to any T that's been passed to the lib! {} or rdata! {} macros. the IntoResult trait is used for rfn results. it has a blanket implementation over T: ToVal, and it's also implemented for GResult<T>. */ //------------------------------------------------------------------------------------------------- // ToVal, FromVal //------------------------------------------------------------------------------------------------- /** A type which can be converted to a GameLisp value. Many functions in the `glsp` crate receive a generic parameter `T: ToVal`. This enables those functions to accept many different Rust types, which will be silently converted to a [`Val`](enum.Val.html). glsp::set_global("numbers", (0, 1, 2, 3, 4))?; arr.push("text")?; Implementing the `ToVal` trait for your own types will enable them to take advantage of automatic conversions for `RFn` return values. struct Rgb(u8, u8, u8); impl ToVal for Rgb { fn to_val(&self) -> GResult<Val> { let Rgb(r, g, b) = self; arr![r, g, b].to_val() } } fn light_sea_green() -> Rgb { Rgb(32, 178, 170) } glsp::bind_rfn("light-sea-green", rfn!(light_sea_green))?; Invoking a type's [`to_val` method](#method.to_val) is usually the most convenient way to produce a `Val`. `ToVal` is part of the [prelude](prelude/index.html), so there's no need to import it into scope. let thousand = 10.0_f64.pow(3.0).to_val()?; */ //we go for by-reference &self, &Val and &Slot, mostly so that we can blanket-implement //ToVal for T where T: Deref, T::Target: ToVal, and so that we don't have to copy Slots //out of the &[Slot] argument slice when constructing rfn parameters, or e.g. out of the //VecDeque<Slot> when accessing an Arr. pub trait ToVal { fn to_val(&self) -> GResult<Val>; #[doc(hidden)] fn to_slot(&self) -> GResult<Slot> { self.to_val()?.to_slot() } } /** A type which can be converted from a GameLisp value. Many functions in the `glsp` crate have a generic return value `R: FromVal`. They can automatically convert their return value to many different Rust types. let numbers: Vec<u8> = glsp::global("numbers")?; let text: Root<Str> = arr.pop()?; Implementing the `FromVal` trait for your own types will also enable them to take advantage of automatic conversions for `RFn` arguments. struct Rgb(u8, u8, u8); impl FromVal for Rgb { fn from_val(val: &Val) -> GResult<Rgb> { let (r, g, b) = <(u8, u8, u8)>::from_val(val)?; Ok(Rgb(r, g, b)) } } fn invert(src: Rgb) -> Rgb { let Rgb(r, g, b) = src; Rgb(255 - r, 255 - g, 255 - b) } glsp::bind_rfn("invert", rfn!(invert))?; Writing `T::from_val(v)?` is usually the most convenient way to destructure a `Val`. `FromVal` is part of the [prelude](prelude/index.html), so there's no need to import it into scope. let f = f64::from_val(val)?; */ pub trait FromVal: Sized { //todo: should this be from_val<V: Borrow<Val>>? would be slightly more convenient when the //user is invoking it directly: they could pass either a Val or a &Val. fn from_val(val: &Val) -> GResult<Self>; #[doc(hidden)] fn from_slot(val: &Slot) -> GResult<Self> { Self::from_val(&val.root()) } } impl<'a, T> ToVal for &'a T where T: ToVal + ?Sized { fn to_val(&self) -> GResult<Val> { (**self).to_val() } fn to_slot(&self) -> GResult<Slot> { (**self).to_slot() } } impl<'a, T> ToVal for &'a mut T where T: ToVal + ?Sized { fn to_val(&self) -> GResult<Val> { (**self).to_val() } fn to_slot(&self) -> GResult<Slot> { (**self).to_slot() } } //------------------------------------------------------------------------------------------------- // ToVal implementations //------------------------------------------------------------------------------------------------- impl ToVal for Val { #[inline(always)] fn to_val(&self) -> GResult<Val> { Ok((*self).clone()) } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { Ok(Slot::from_val(self)) } } impl ToVal for Slot { #[inline(always)] fn to_val(&self) -> GResult<Val> { Ok((*self).root()) } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { Ok((*self).clone()) } } impl<T> ToVal for Option<T> where T: ToVal { fn to_val(&self) -> GResult<Val> { match self { Some(src) => src.to_val(), None => Ok(Val::Nil) } } fn to_slot(&self) -> GResult<Slot> { match self { Some(src) => src.to_slot(), None => Ok(Slot::Nil) } } } impl ToVal for () { #[inline(always)] fn to_val(&self) -> GResult<Val> { Ok(Val::Nil) } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { Ok(Slot::Nil) } } macro_rules! impl_to_val_infallible { ($self_type:ty, $variant:ident) => ( impl ToVal for $self_type { #[inline(always)] fn to_val(&self) -> GResult<Val> { Ok(Val::$variant((*self).into())) } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { Ok(Slot::$variant((*self).into())) } } ); } impl_to_val_infallible!(i8, Int); impl_to_val_infallible!(i16, Int); impl_to_val_infallible!(i32, Int); impl_to_val_infallible!(u8, Int); impl_to_val_infallible!(u16, Int); impl_to_val_infallible!(f32, Flo); impl_to_val_infallible!(char, Char); impl_to_val_infallible!(bool, Bool); impl_to_val_infallible!(Sym, Sym); impl_to_val_infallible!(RFn, RFn); macro_rules! impl_to_val_root { ($t:ty, $variant:ident) => ( impl ToVal for Root<$t> { #[inline(always)] fn to_val(&self) -> GResult<Val> { Ok(Val::$variant(self.clone())) } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { Ok(Slot::$variant(Gc::from_root(self))) } } impl ToVal for Gc<$t> { #[inline(always)] fn to_val(&self) -> GResult<Val> { Ok(Val::$variant(self.root())) } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { Ok(Slot::$variant(self.clone())) } } ); } impl_to_val_root!(Arr, Arr); impl_to_val_root!(Str, Str); impl_to_val_root!(Tab, Tab); impl_to_val_root!(GIter, GIter); impl_to_val_root!(Obj, Obj); impl_to_val_root!(Class, Class); impl_to_val_root!(GFn, GFn); impl_to_val_root!(Coro, Coro); impl_to_val_root!(RData, RData); impl<T: RStore> ToVal for RRoot<T> { #[inline(always)] fn to_val(&self) -> GResult<Val> { Ok(Val::RData(self.to_root())) } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { Ok(Slot::RData(self.to_gc())) } } impl ToVal for Deque { #[inline(always)] fn to_val(&self) -> GResult<Val> { match *self { Deque::Arr(ref root) => Ok(Val::Arr(root.clone())), Deque::Str(ref root) => Ok(Val::Str(root.clone())) } } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { match *self { Deque::Arr(ref root) => Ok(Slot::Arr(root.to_gc())), Deque::Str(ref root) => Ok(Slot::Str(root.to_gc())) } } } impl ToVal for Callable { #[inline(always)] fn to_val(&self) -> GResult<Val> { match *self { Callable::GFn(ref root) => Ok(Val::GFn(root.clone())), Callable::RFn(rfn) => Ok(Val::RFn(rfn)), Callable::Class(ref root) => Ok(Val::Class(root.clone())) } } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { match *self { Callable::GFn(ref root) => Ok(Slot::GFn(Gc::from_root(root))), Callable::RFn(rfn) => Ok(Slot::RFn(rfn)), Callable::Class(ref root) => Ok(Slot::Class(Gc::from_root(root))) } } } impl ToVal for Expander { #[inline(always)] fn to_val(&self) -> GResult<Val> { match *self { Expander::GFn(ref root) => Ok(Val::GFn(root.clone())), Expander::RFn(rfn) => Ok(Val::RFn(rfn)) } } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { match *self { Expander::GFn(ref root) => Ok(Slot::GFn(Gc::from_root(root))), Expander::RFn(rfn) => Ok(Slot::RFn(rfn)) } } } impl ToVal for Iterable { #[inline(always)] fn to_val(&self) -> GResult<Val> { match self { Iterable::Arr(root) => Ok(Val::Arr(root.clone())), Iterable::Str(root) => Ok(Val::Str(root.clone())), Iterable::Tab(root) => Ok(Val::Tab(root.clone())), Iterable::GIter(root) => Ok(Val::GIter(root.clone())), Iterable::Coro(root) => Ok(Val::Coro(root.clone())) } } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { match self { Iterable::Arr(root) => Ok(Slot::Arr(Gc::from_root(root))), Iterable::Str(root) => Ok(Slot::Str(Gc::from_root(root))), Iterable::Tab(root) => Ok(Slot::Tab(Gc::from_root(root))), Iterable::GIter(root) => Ok(Slot::GIter(Gc::from_root(root))), Iterable::Coro(root) => Ok(Slot::Coro(Gc::from_root(root))) } } } impl ToVal for GIterLen { #[inline(always)] fn to_val(&self) -> GResult<Val> { match *self { GIterLen::Exact(len) => Ok(Val::Int(len as i32)), GIterLen::Infinite => Ok(Val::Sym(INFINITE_SYM)), GIterLen::Unknown => Ok(Val::Sym(UNKNOWN_SYM)) } } } impl ToVal for Ordering { #[inline(always)] fn to_val(&self) -> GResult<Val> { match *self { Ordering::Less => Ok(Val::Sym(LT_SYM)), Ordering::Equal => Ok(Val::Sym(NUM_EQ_SYM)), Ordering::Greater => Ok(Val::Sym(GT_SYM)) } } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { match *self { Ordering::Less => Ok(Slot::Sym(LT_SYM)), Ordering::Equal => Ok(Slot::Sym(NUM_EQ_SYM)), Ordering::Greater => Ok(Slot::Sym(GT_SYM)) } } } macro_rules! impl_to_val_bounded_int { ($self_type:ty) => ( impl ToVal for $self_type { #[inline(always)] fn to_val(&self) -> GResult<Val> { if let Ok(converted) = (*self).try_into() { Ok(Val::Int(converted)) } else { bail!("the result was {}, which is outside the range of an i32", self) } } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { if let Ok(converted) = (*self).try_into() { Ok(Slot::Int(converted)) } else { bail!("the result was {}, which is outside the range of an i32", self) } } } ); } impl_to_val_bounded_int!(i64); impl_to_val_bounded_int!(i128); impl_to_val_bounded_int!(isize); impl_to_val_bounded_int!(u32); impl_to_val_bounded_int!(u64); impl_to_val_bounded_int!(u128); impl_to_val_bounded_int!(usize); impl ToVal for f64 { #[inline(always)] fn to_val(&self) -> GResult<Val> { Ok(Val::Flo(*self as f32)) } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { Ok(Slot::Flo(*self as f32)) } } impl ToVal for Num { #[inline(always)] fn to_val(&self) -> GResult<Val> { match *self { Num::Int(i) => Ok(Val::Int(i)), Num::Flo(f) => Ok(Val::Flo(f)) } } #[inline(always)] fn to_slot(&self) -> GResult<Slot> { match *self { Num::Int(i) => Ok(Slot::Int(i)), Num::Flo(f) => Ok(Slot::Flo(f)) } } } impl<T> ToVal for Vec<T> where for<'a> &'a T: ToVal { fn to_val(&self) -> GResult<Val> { let arr = glsp::arr_with_capacity(self.len()); for t in self.iter() { arr.push(t)? } Ok(Val::Arr(arr)) } } impl<T> ToVal for VecDeque<T> where for<'a> &'a T: ToVal { fn to_val(&self) -> GResult<Val> { let arr = glsp::arr_with_capacity(self.len()); for t in self.iter() { arr.push(t)? } Ok(Val::Arr(arr)) } } impl<A: smallvec::Array> ToVal for SmallVec<A> where for<'a> &'a A::Item: ToVal { fn to_val(&self) -> GResult<Val> { let arr = glsp::arr_with_capacity(self.len()); for t in self.iter() { arr.push(t)? } Ok(Val::Arr(arr)) } } impl<'a, T: ToVal> ToVal for &'a [T] { fn to_val(&self) -> GResult<Val> { let arr = glsp::arr_with_capacity(self.len()); for t in self.iter() { arr.push(t)? } Ok(Val::Arr(arr)) } } macro_rules! impl_to_val_array { ($($len:literal),+) => ( $( impl<T> ToVal for [T; $len] where for<'a> &'a T: ToVal { fn to_val(&self) -> GResult<Val> { let arr = glsp::arr_with_capacity($len); for t in self.iter() { arr.push(t)? } Ok(Val::Arr(arr)) } } )+ ); } impl_to_val_array!( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 ); macro_rules! impl_to_val_tuple { ($len:literal: $($t:ident $i:tt),+) => ( impl<$($t),+> ToVal for ($($t,)+) where $( for<'a> &'a $t: ToVal ),+ { fn to_val(&self) -> GResult<Val> { let arr = glsp::arr_with_capacity($len); $( arr.push(&(self.$i))?; )+ Ok(Val::Arr(arr)) } } ); } impl_to_val_tuple!( 1: A 0); impl_to_val_tuple!( 2: A 0, B 1); impl_to_val_tuple!( 3: A 0, B 1, C 2); impl_to_val_tuple!( 4: A 0, B 1, C 2, D 3); impl_to_val_tuple!( 5: A 0, B 1, C 2, D 3, E 4); impl_to_val_tuple!( 6: A 0, B 1, C 2, D 3, E 4, F 5); impl_to_val_tuple!( 7: A 0, B 1, C 2, D 3, E 4, F 5, G 6); impl_to_val_tuple!( 8: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); impl_to_val_tuple!( 9: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); impl_to_val_tuple!(10: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); impl_to_val_tuple!(11: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); impl_to_val_tuple!(12: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11); impl ToVal for String { fn to_val(&self) -> GResult<Val> { Ok(Val::Str(glsp::str_from_rust_str(&self))) } } impl ToVal for str { fn to_val(&self) -> GResult<Val> { Ok(Val::Str(glsp::str_from_rust_str(self))) } } impl ToVal for CString { fn to_val(&self) -> GResult<Val> { match self.to_str() { Ok(str_ref) => str_ref.to_val(), Err(_) => bail!("CString contained non-UTF-8 data") } } } impl ToVal for CStr { fn to_val(&self) -> GResult<Val> { match self.to_str() { Ok(str_ref) => str_ref.to_val(), Err(_) => bail!("CStr contained non-UTF-8 data") } } } impl ToVal for OsString { fn to_val(&self) -> GResult<Val> { match self.to_str() { Some(str_ref) => str_ref.to_val(), None => bail!("OsString contained non-UTF-8 data") } } } impl ToVal for OsStr { fn to_val(&self) -> GResult<Val> { match self.to_str() { Some(str_ref) => str_ref.to_val(), None => bail!("OsStr contained non-UTF-8 data") } } } impl ToVal for PathBuf { fn to_val(&self) -> GResult<Val> { self.as_os_str().to_val() } } impl ToVal for Path { fn to_val(&self) -> GResult<Val> { self.as_os_str().to_val() } } impl<K: ToVal, V: ToVal, S> ToVal for HashMap<K, V, S> { fn to_val(&self) -> GResult<Val> { let tab = glsp::tab_with_capacity(self.len()); for (key, value) in self.iter() { let key_slot = key.to_slot()?; ensure!(!tab.has(&key_slot)?, "duplicate key in HashMap"); tab.set(&key_slot, value)?; } Ok(Val::Tab(tab)) } } impl<K: ToVal, V: ToVal> ToVal for BTreeMap<K, V> { fn to_val(&self) -> GResult<Val> { let tab = glsp::tab_with_capacity(self.len()); for (key, value) in self.iter() { let key_slot = key.to_slot()?; ensure!(!tab.has(&key_slot)?, "duplicate key in BTreeMap"); tab.set(&key_slot, value)?; } Ok(Val::Tab(tab)) } } //------------------------------------------------------------------------------------------------- // IntoResult //------------------------------------------------------------------------------------------------- /** A type which can be returned from an `RFn`. A blanket implementation is provided for any type which implements [`ToVal`](trait.ToVal.html), and also for any [`GResult<T>`](type.GResult.html) where `T: ToVal`. It's not possible to implement this trait for your own types. Implement [`ToVal`](trait.ToVal.html) instead, or define your type using [`rdata!`](macro.rdata.html) or [`lib!`](macro.lib.html). */ pub trait IntoResult { #[doc(hidden)] fn into_result(self) -> GResult<Slot>; } //once specialization is enabled, we'll need to provide specialized impls for tuples, arrays, etc. //we can't currently return collections of things which are passed by value, like RData. impl<T> IntoResult for T where T: ToVal { fn into_result(self) -> GResult<Slot> { self.to_slot() } } //once specialization is enabled, we should add a generic impl for Result<T, E: Error> impl<T> IntoResult for GResult<T> where T: IntoResult { fn into_result(self) -> GResult<Slot> { match self { Ok(payload) => payload.into_result(), Err(err) => Err(err) } } } //------------------------------------------------------------------------------------------------- // FromVal implementations //------------------------------------------------------------------------------------------------- // Val, Slot //----------------------------------------------------------------------------- impl FromVal for Val { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { Ok(val.clone()) } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { Ok(val.root()) } } impl FromVal for Slot { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { Ok(Slot::from_val(val)) } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { Ok(val.clone()) } } // integers and other trivial Val fields //----------------------------------------------------------------------------- macro_rules! impl_from_val_infallible( ($(($t:ty, $variant:ident)),+) => ( $( impl FromVal for $t { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::$variant(interior) => Ok(interior as $t), ref val => bail!("expected {}, received {}", stringify!($t), val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::$variant(interior) => Ok(interior as $t), ref val => bail!("expected {}, received {}", stringify!($t), val.a_type_name()) } } } )+ ); ); impl_from_val_infallible!( (i32, Int), (i64, Int), (i128, Int), (isize, Int), (char, Char), (bool, Bool), (Sym, Sym), (RFn, RFn) ); macro_rules! impl_from_val_root( ($(($t:ty, $variant:ident)),+) => ( $( impl FromVal for Root<$t> { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::$variant(ref root) => Ok(root.clone()), ref val => bail!("expected {}, received {}", stringify!(Root<$t>), val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::$variant(ref gc) => Ok(gc.root()), ref val => bail!("expected {}, received {}", stringify!(Root<$t>), val.a_type_name()) } } } impl FromVal for Gc<$t> { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::$variant(ref root) => Ok(root.as_gc().clone()), ref val => bail!("expected {}, received {}", stringify!(Gc<$t>), val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::$variant(ref gc) => Ok(gc.clone()), ref val => bail!("expected {}, received {}", stringify!(Gc<$t>), val.a_type_name()) } } } )+ ); ); impl_from_val_root!( (Arr, Arr), (Str, Str), (Tab, Tab), (GIter, GIter), (Obj, Obj), (GFn, GFn), (Class, Class), (Coro, Coro), (RData, RData) ); impl<T: RStore> FromVal for RRoot<T> { #[inline(always)] fn from_val(val: &Val) -> GResult<RRoot<T>> { match val { Val::RData(root) => Ok(RRoot::new(root.clone())), val => bail!("expected RRoot<{}>, received {}", type_name::<T>(), val.a_type_name()) } } #[inline(always)] fn from_slot(slot: &Slot) -> GResult<RRoot<T>> { match slot { Slot::RData(gc) => Ok(RRoot::new(gc.root())), val => bail!("expected RRoot<{}>, received {}", type_name::<T>(), val.a_type_name()) } } } macro_rules! impl_from_val_int_fallible_small( ($($t:ident),+) => ( $( impl FromVal for $t { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Int(i) if i >= $t::MIN as i32 && i <= $t::MAX as i32 => { Ok(i as $t) } Val::Int(i) => { bail!("expected {}, received an int with value {}", stringify!($t), i) } ref val => bail!("expected {}, received {}", stringify!($t), val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::Int(i) if i >= $t::MIN as i32 && i <= $t::MAX as i32 => { Ok(i as $t) } Slot::Int(i) => { bail!("expected {}, received an int with value {}", stringify!($t), i) } ref val => bail!("expected {}, received {}", stringify!($t), val.a_type_name()) } } } )+ ); ); impl_from_val_int_fallible_small!(i8, i16, u8, u16); macro_rules! impl_from_val_int_fallible_large( ($($t:ty),+) => ( $( impl FromVal for $t { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Int(i) if i >= 0 => { Ok(i as $t) } Val::Int(i) => { bail!("expected {}, received an int with value {}", stringify!($t), i) } ref val => bail!("expected {}, received {}", stringify!($t), val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::Int(i) if i >= 0 => { Ok(i as $t) } Slot::Int(i) => { bail!("expected {}, received an int with value {}", stringify!($t), i) } ref val => bail!("expected {}, received {}", stringify!($t), val.a_type_name()) } } } )+ ); ); impl_from_val_int_fallible_large!(u32, u64, u128, usize); // f32, f64 //----------------------------------------------------------------------------- impl FromVal for f32 { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Flo(f) => Ok(f), ref val => bail!("expected f32, received {}", val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::Flo(f) => Ok(f), ref val => bail!("expected f32, received {}", val.a_type_name()) } } } impl FromVal for f64 { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Flo(f) => Ok(f as f64), ref val => bail!("expected f64, received {}", val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::Flo(f) => Ok(f as f64), ref val => bail!("expected f64, received {}", val.a_type_name()) } } } // Num //----------------------------------------------------------------------------- impl FromVal for Num { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Int(i) => Ok(Num::Int(i)), Val::Flo(f) => Ok(Num::Flo(f)), ref val => bail!("expected Num, received {}", val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::Int(i) => Ok(Num::Int(i)), Slot::Flo(f) => Ok(Num::Flo(f)), ref val => bail!("expected Num, received {}", val.a_type_name()) } } } // Deque //----------------------------------------------------------------------------- impl FromVal for Deque { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Arr(ref root) => Ok(Deque::Arr(root.clone())), Val::Str(ref root) => Ok(Deque::Str(root.clone())), ref val => bail!("expected Deque, received {}", val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::Arr(ref gc) => Ok(Deque::Arr(gc.root())), Slot::Str(ref gc) => Ok(Deque::Str(gc.root())), ref val => bail!("expected Deque, received {}", val.a_type_name()) } } } // Callable, Expander, Iterable, EnvMode //----------------------------------------------------------------------------- impl FromVal for Callable { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::GFn(ref root) => Ok(Callable::GFn(root.clone())), Val::RFn(rfn) => Ok(Callable::RFn(rfn)), Val::Class(ref root) => Ok(Callable::Class(root.clone())), ref val => bail!("expected Callable, received {}", val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::GFn(ref gc) => Ok(Callable::GFn(gc.root())), Slot::RFn(rfn) => Ok(Callable::RFn(rfn)), Slot::Class(ref gc) => Ok(Callable::Class(gc.root())), ref val => bail!("expected Callable, received {}", val.a_type_name()) } } } impl FromVal for Expander { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::GFn(ref root) => Ok(Expander::GFn(root.clone())), Val::RFn(rfn) => Ok(Expander::RFn(rfn)), ref val => bail!("expected Expander, received {}", val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::GFn(ref gc) => Ok(Expander::GFn(gc.root())), Slot::RFn(rfn) => Ok(Expander::RFn(rfn)), ref val => bail!("expected Expander, received {}", val.a_type_name()) } } } impl FromVal for Iterable { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match val { Val::Arr(root) => Ok(Iterable::Arr(root.clone())), Val::Str(root) => Ok(Iterable::Str(root.clone())), Val::Tab(root) => Ok(Iterable::Tab(root.clone())), Val::GIter(root) => Ok(Iterable::GIter(root.clone())), Val::Coro(root) => Ok(Iterable::Coro(root.clone())), val => bail!("expected Iterable, received {}", val.a_type_name()) } } #[inline(always)] fn from_slot(slot: &Slot) -> GResult<Self> { match slot { Slot::Arr(gc) => Ok(Iterable::Arr(gc.root())), Slot::Str(gc) => Ok(Iterable::Str(gc.root())), Slot::Tab(gc) => Ok(Iterable::Tab(gc.root())), Slot::GIter(gc) => Ok(Iterable::GIter(gc.root())), Slot::Coro(gc) => Ok(Iterable::Coro(gc.root())), slot => bail!("expected Iterable, received {}", slot.a_type_name()) } } } impl FromVal for EnvMode { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Sym(sym) => { match sym { FRESH_SYM => Ok(EnvMode::Fresh), COPIED_SYM => Ok(EnvMode::Copied), _ => bail!("expected an EnvMode, received the symbol {}", sym) } } ref val => bail!("expected an EnvMode, received {}", val.a_type_name()) } } } // Ordering //----------------------------------------------------------------------------- impl FromVal for Ordering { #[inline(always)] fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Sym(LT_SYM) => Ok(Ordering::Less), Val::Sym(NUM_EQ_SYM) => Ok(Ordering::Equal), Val::Sym(GT_SYM) => Ok(Ordering::Greater), ref val => bail!("expected Ordering, received {}", val.a_type_name()) } } #[inline(always)] fn from_slot(val: &Slot) -> GResult<Self> { match *val { Slot::Sym(LT_SYM) => Ok(Ordering::Less), Slot::Sym(NUM_EQ_SYM) => Ok(Ordering::Equal), Slot::Sym(GT_SYM) => Ok(Ordering::Greater), ref val => bail!("expected Ordering, received {}", val.a_type_name()) } } } // Vec<T> //----------------------------------------------------------------------------- impl<T: FromVal> FromVal for Vec<T> { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Arr(ref arr) => { let mut vec = Vec::<T>::with_capacity(arr.len()); let arr_borrow = arr.borrow(); for slot in arr_borrow.iter() { vec.push(T::from_slot(slot)?); } Ok(vec) } ref val => bail!("expected a Vec, received {}", val.a_type_name()) } } } // VecDeque<T> //----------------------------------------------------------------------------- impl<T: FromVal> FromVal for VecDeque<T> { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Arr(ref arr) => { let mut vec = VecDeque::<T>::with_capacity(arr.len()); let arr_borrow = arr.borrow(); for slot in arr_borrow.iter() { vec.push_back(T::from_slot(slot)?); } Ok(vec) } ref val => bail!("expected a VecDeque, received {}", val.a_type_name()) } } } // SmallVec<A> //----------------------------------------------------------------------------- impl<A> FromVal for SmallVec<A> where A: smallvec::Array, A::Item: FromVal { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Arr(ref arr) => { let mut small_vec = SmallVec::<A>::with_capacity(arr.len()); let arr_borrow = arr.borrow(); for slot in arr_borrow.iter() { small_vec.push(A::Item::from_slot(slot)?); } Ok(small_vec) } ref val => bail!("expected a SmallVec, received {}", val.a_type_name()) } } } // [T; n] where T: FromVal //----------------------------------------------------------------------------- macro_rules! impl_from_val_array { ($($len:literal [$($n:literal),*]),+) => ( $( impl<T> FromVal for [T; $len] where T: FromVal { fn from_val(val: &Val) -> GResult<[T; $len]> { match *val { Val::Arr(ref arr) => { ensure!(arr.len() == $len, "expected a [T; {}], received an arr of length {}", $len, arr.len()); Ok([$( arr.get::<T>($n)?, )*]) } ref val => { bail!("expected a [T; {}], received {}", $len, val.a_type_name()) } } } } )+ ); } impl_from_val_array!( 0 [], 1 [0], 2 [0, 1], 3 [0, 1, 2], 4 [0, 1, 2, 3], 5 [0, 1, 2, 3, 4], 6 [0, 1, 2, 3, 4, 5], 7 [0, 1, 2, 3, 4, 5, 6], 8 [0, 1, 2, 3, 4, 5, 6, 7], 9 [0, 1, 2, 3, 4, 5, 6, 7, 8], 10 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 11 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 12 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 13 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 14 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 15 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], 16 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ); // (T0, T1, ...) where T0: FromVal, T1: FromVal... //----------------------------------------------------------------------------- macro_rules! impl_from_val_tuple { ($len:literal: $($t:ident $i:tt),+) => ( impl<$($t),+> FromVal for ($($t,)+) where $($t: FromVal),+ { fn from_val(val: &Val) -> GResult<($($t,)+)> { match *val { Val::Arr(ref arr) => { ensure!(arr.len() == $len, "expected a {}-element tuple, received an arr of length {}", $len, arr.len()); Ok(($( arr.get::<$t>($i)?, )*)) } ref val => bail!("expected a tuple, received {}", val.a_type_name()) } } } ); } impl_from_val_tuple!( 1: A 0); impl_from_val_tuple!( 2: A 0, B 1); impl_from_val_tuple!( 3: A 0, B 1, C 2); impl_from_val_tuple!( 4: A 0, B 1, C 2, D 3); impl_from_val_tuple!( 5: A 0, B 1, C 2, D 3, E 4); impl_from_val_tuple!( 6: A 0, B 1, C 2, D 3, E 4, F 5); impl_from_val_tuple!( 7: A 0, B 1, C 2, D 3, E 4, F 5, G 6); impl_from_val_tuple!( 8: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); impl_from_val_tuple!( 9: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); impl_from_val_tuple!(10: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); impl_from_val_tuple!(11: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); impl_from_val_tuple!(12: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11); // String //----------------------------------------------------------------------------- impl FromVal for String { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Str(ref st) => Ok(st.to_string()), ref val => bail!("expected a str, received {}", val.a_type_name()) } } } // CString //----------------------------------------------------------------------------- impl FromVal for CString { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Str(ref st) => { match CString::new(st.to_string()) { Ok(cstring) => Ok(cstring), Err(_) => { bail!("expected a C string, received a str with an inner nul") } } } ref val => bail!("expected a C string, received {}", val.a_type_name()) } } } // PathBuf //----------------------------------------------------------------------------- impl FromVal for PathBuf { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Str(ref st) => Ok(PathBuf::from(st.to_string())), ref val => bail!("expected a path, received {}", val.a_type_name()) } } } // OsString //----------------------------------------------------------------------------- impl FromVal for OsString { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Str(ref st) => Ok(OsString::from(st.to_string())), ref val => bail!("expected an OS string, received {}", val.a_type_name()) } } } // HashMap<K, V> //----------------------------------------------------------------------------- impl<K, V, S> FromVal for HashMap<K, V, S> where K: Hash + Eq + FromVal, V: FromVal, S: BuildHasher + Default { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Tab(ref tab) => { let s = S::default(); let mut hash_map = HashMap::<K, V, S>::with_capacity_and_hasher(tab.len(), s); let tab_borrow = tab.borrow(); for (internal_key, internal_value) in tab_borrow.iter() { let key = K::from_slot(internal_key)?; let value = V::from_slot(internal_value)?; if hash_map.insert(key, value).is_some() { bail!("duplicate key in HashMap argument"); } } Ok(hash_map) } ref val => bail!("expected a HashMap, received {}", val.a_type_name()) } } } // BTreeMap<K, V> //----------------------------------------------------------------------------- impl<K: Ord + FromVal, V: FromVal> FromVal for BTreeMap<K, V> { fn from_val(val: &Val) -> GResult<Self> { match *val { Val::Tab(ref tab) => { let mut btree_map = BTreeMap::<K, V>::new(); let tab_borrow = tab.borrow(); for (internal_key, internal_value) in tab_borrow.iter() { let key = K::from_slot(internal_key)?; let value = V::from_slot(internal_value)?; if btree_map.insert(key, value).is_some() { bail!("duplicate key in BTreeMap argument"); } } Ok(btree_map) } ref val => bail!("expected a BTreeMap, received {}", val.a_type_name()) } } } //------------------------------------------------------------------------------------------------- // rfn!() and WrappedFn //------------------------------------------------------------------------------------------------- /** Wrap a function pointer or closure so that it can be passed to [`glsp::rfn`](fn.rfn.html) and similar functions. The macro receives a single argument, which must be the path to a function, the path to a method, or an expression which evaluates to a *non-capturing* closure. The return value is a [`WrappedFn`](struct.WrappedFn.html). This is an opaque type: it doesn't support any operations except being converted to an `RFn`. In effect, this macro takes an arbitrary Rust function and converts it into a type-erased function pointer which can be called by GameLisp. The function's return value must implement [`IntoResult`](trait.IntoResult.html), and all of its arguments must implement [`MakeArg`](trait.MakeArg.html). */ #[macro_export] macro_rules! rfn { ($fn_expr:expr) => ( { $crate::WrappedFn::new( |vals: std::cell::Ref<[$crate::Slot]>| -> $crate::GResult<$crate::Slot> { let mut temps = $crate::make_temps($fn_expr, &*vals)?; drop(vals); $crate::forwarder($fn_expr, &mut temps) }, $crate::wrapped_arg_limits($fn_expr) ) } ); } /** Data required to construct an `RFn`. This opaque struct is produced by the [`rfn!` macro](macro.rfn.html), and consumed by [`glsp::rfn`](fn.rfn.html), [`glsp::bind_rfn`](fn.bind_rfn.html), and similar functions. */ #[derive(Copy, Clone)] pub struct WrappedFn { wrapper: fn(Ref<[Slot]>) -> GResult<Slot>, pub(crate) arg_limits: (usize, Option<usize>) } impl WrappedFn { #[doc(hidden)] pub fn new(wrapper: fn(Ref<[Slot]>) -> GResult<Slot>, arg_limits: (usize, Option<usize>)) -> WrappedFn { WrappedFn { wrapper, arg_limits } } #[inline(always)] pub(crate) fn call(&self, vals: Ref<[Slot]>) -> GResult<Slot> { if vals.len() < self.arg_limits.0 { bail!("too few arguments: received {}, expected at least {}", vals.len(), self.arg_limits.0) } if let Some(max_args) = self.arg_limits.1 { if vals.len() > max_args { bail!("too many arguments: received {}, expected no more than {}", vals.len(), max_args) } } (self.wrapper)(vals) } pub(crate) fn as_usize(&self) -> usize { self.wrapper as usize } } //------------------------------------------------------------------------------------------------- // multiplexing tuple traits //------------------------------------------------------------------------------------------------- pub trait TupleCall<Args> { type Output; fn tuple_call(&self, args: Args) -> Self::Output; } pub trait MakeTemps { type Temps: 'static; fn make_temps( slots: &[Slot] ) -> GResult<Self::Temps>; fn arg_limits() -> (usize, Option<usize>); } pub trait MakeArgs<'a>: Sized + MakeTemps { fn make_args( temps: &'a mut Self::Temps ) -> GResult<Self>; } #[doc(hidden)] #[inline(always)] pub fn make_temps<Args, F>( _f: F, vals: &[Slot] ) -> GResult<Args::Temps> where Args: MakeTemps, F: TupleCall<Args> { Args::make_temps(vals) } #[doc(hidden)] #[inline(always)] pub fn forwarder<'a, Args, F>( f: F, temps: &'a mut Args::Temps ) -> GResult<Slot> where Args: MakeArgs<'a>, F: TupleCall<Args>, F::Output: IntoResult { f.tuple_call(Args::make_args(temps)?).into_result() } #[doc(hidden)] pub fn wrapped_arg_limits<Args, F>(_f: F) -> (usize, Option<usize>) where Args: MakeTemps, F: TupleCall<Args> { Args::arg_limits() } macro_rules! tuple_impls( ($arg_count:literal; $($arg_type:ident),*; $($i:tt),*) => ( impl<F, R $(,$arg_type)*> TupleCall<($($arg_type,)*)> for F where F: Fn($($arg_type),*) -> R { type Output = R; #[allow(unused_assignments, unused_mut, unused_variables)] #[inline(always)] fn tuple_call(&self, args: ($($arg_type,)*)) -> R { self($(args.$i),*) } } impl<'a $(,$arg_type)*> MakeTemps for ($($arg_type,)*) where $( $arg_type: MakeTemp ),* { type Temps = ($($arg_type::Temp,)*); #[allow(unused_assignments, unused_mut, unused_variables)] #[inline(always)] fn arg_limits() -> (usize, Option<usize>) { let mut normal_args = 0; let mut opt_args = 0; let mut seen_rest = false; let mut seen_opt = false; let mut seen_glsp_or_lib = false; $( assert!(!seen_rest, "&[T] argument is somewhere other than final position"); match $arg_type::ARG_TYPE { ArgType::Lib => { assert!(normal_args == 0 && opt_args == 0 && !seen_rest, "&Lib argument appears after a normal argument"); } ArgType::Normal => { assert!(!seen_opt, "Option<T> followed by a non-optional argument"); normal_args += 1; } ArgType::Opt => { seen_opt = true; opt_args += 1; } ArgType::Rest => { seen_rest = true; } } )* (normal_args, if seen_rest { None } else { Some(normal_args + opt_args) }) } #[allow(unused_assignments, unused_mut, unused_variables)] #[inline(always)] fn make_temps( vals: &[Slot] ) -> GResult<($($arg_type::Temp,)*)> { let mut i = 0; Ok(( $( { let temp = $arg_type::make_temp(vals, i)?; if $arg_type::ARG_TYPE != ArgType::Lib { i += 1; } temp } ,)* )) } } impl<'a $(,$arg_type)*> MakeArgs<'a> for ($($arg_type,)*) where $( $arg_type: MakeArg<'a> ),* { #[allow(unused_assignments, unused_mut, unused_variables)] #[inline(always)] fn make_args( temps: &'a mut Self::Temps ) -> GResult<Self> { let mut i = 0; Ok(( $( { let temp = $arg_type::make_arg(&mut temps.$i)?; if $arg_type::ARG_TYPE != ArgType::Lib { i += 1; } temp } ,)* )) } } ); ); //todo: ideally we would process Lib parameters last, so that arguments conversions using FromVal //can access Libs. the ideal evaluation order would be make_temps, make_args, make_temps_libs, //then make_args_libs, but there seems to be no straightforward way to achieve that, at least //without risking poor performance or increasing compile times... tuple_impls!(0; ; ); tuple_impls!(1; T0; 0); tuple_impls!(2; T0, T1; 0, 1); tuple_impls!(3; T0, T1, T2; 0, 1, 2); tuple_impls!(4; T0, T1, T2, T3; 0, 1, 2, 3); tuple_impls!(5; T0, T1, T2, T3, T4; 0, 1, 2, 3, 4); tuple_impls!(6; T0, T1, T2, T3, T4, T5; 0, 1, 2, 3, 4, 5); tuple_impls!(7; T0, T1, T2, T3, T4, T5, T6; 0, 1, 2, 3, 4, 5, 6); tuple_impls!(8; T0, T1, T2, T3, T4, T5, T6, T7; 0, 1, 2, 3, 4, 5, 6, 7); tuple_impls!(9; T0, T1, T2, T3, T4, T5, T6, T7, T8; 0, 1, 2, 3, 4, 5, 6, 7, 8); tuple_impls!(10; T0, T1, T2, T3, T4, T5, T6, T7, T8, T9; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); tuple_impls!(11; T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); tuple_impls!(12; T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11; 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); //------------------------------------------------------------------------------------------------- // MakeTemp and MakeArg, and their implementations //------------------------------------------------------------------------------------------------- #[doc(hidden)] #[derive(PartialEq)] pub enum ArgType { Lib, Normal, Opt, Rest } #[doc(hidden)] pub trait MakeTemp { const ARG_TYPE: ArgType = ArgType::Normal; type Temp: 'static; fn make_temp( vals: &[Slot], i: usize ) -> GResult<Self::Temp>; } /** A type which can act as an `RFn` parameter. A blanket implementation is provided for any type which implements [`FromVal`](trait.FromVal.html), and also for several other types: - `Option<T>`, which acts as an optional parameter, storing `None` when an argument isn't provided. - `&[T]` or `&mut [T]`, which act as a "rest" parameter, capturing any number of arguments. - [`OrNil<T>`](struct.OrNil.html), which accepts either the specified type or `#n`. - Shared references to primitive types: [`&Arr`](struct.Arr.html), [`&Tab`](struct.Tab.html), etc. - String slices: `&str`, `&Path`, `&CStr`, `&OsStr`. - `&T` and `&mut T`, where `T` was defined using the [`lib!`](macro.lib.html) or [`rdata!`](macro.rdata.html) macros. It's not possible to implement this trait for your own types. Implement [`FromVal`](trait.FromVal.html) instead, or define your type using [`rdata!`](macro.rdata.html) or [`lib!`](macro.lib.html). */ pub trait MakeArg<'a>: Sized + MakeTemp { fn make_arg( temp: &'a mut Self::Temp ) -> GResult<Self>; } impl<T> MakeTemp for T where T: FromVal { type Temp = Slot; #[inline(always)] fn make_temp( vals: &[Slot], i: usize ) -> GResult<Slot> { Ok(vals[i].clone()) } } //note that the specialization RFC implies that &'a T is supposed to be more-specific than T, //even though this isn't the case with today's feature(specialization). once this is the case, //we will be able to specialize this impl for &T where T: FromVal etc. impl<'a, T> MakeArg<'a> for T where T: Sized + FromVal + MakeTemp<Temp = Slot> { #[inline(always)] fn make_arg( temp: &'a mut Slot ) -> GResult<T> { T::from_slot(&temp) } } // Option<T> //----------------------------------------------------------------------------- impl<T> MakeTemp for Option<T> where T: MakeTemp { const ARG_TYPE: ArgType = ArgType::Opt; type Temp = Option<T::Temp>; #[inline(always)] fn make_temp( vals: &[Slot], i: usize ) -> GResult<Option<T::Temp>> { assert!(T::ARG_TYPE == ArgType::Normal, "invalid Option<T> argument in rfn"); if i < vals.len() { Ok(Some(T::make_temp(vals, i)?)) } else { Ok(None) } } } impl<'a, T> MakeArg<'a> for Option<T> where T: MakeArg<'a> { #[inline(always)] fn make_arg( temp: &'a mut Option<T::Temp> ) -> GResult<Option<T>> { match *temp { Some(ref mut temp) => Ok(Some(T::make_arg(temp)?)), None => Ok(None) } } } // OrNil<T> //----------------------------------------------------------------------------- /** A wrapper type for an `RFn` argument which allows it to be `#n`. For example, this function could be called as either `(example 10)` or `(example #n)`: fn example(OrNil(i): OrNil<i32>) { if let Some(i) = i { println!("{}", i); } } */ pub struct OrNil<T>(pub Option<T>); impl<T> MakeTemp for OrNil<T> where T: MakeTemp { type Temp = Option<T::Temp>; #[inline(always)] fn make_temp( vals: &[Slot], i: usize ) -> GResult<Option<T::Temp>> { assert!(T::ARG_TYPE == ArgType::Normal, "invalid OrNil<T> argument in rfn"); if let Slot::Nil = vals[i] { Ok(None) } else { Ok(Some(T::make_temp(vals, i)?)) } } } impl<'a, T> MakeArg<'a> for OrNil<T> where T: MakeArg<'a> { #[inline(always)] fn make_arg( temp: &'a mut Option<T::Temp> ) -> GResult<OrNil<T>> { match temp { Some(ref mut temp) => Ok(OrNil(Some(T::make_arg(temp)?))), None => Ok(OrNil(None)) } } } // &[T] //----------------------------------------------------------------------------- impl<'r, T> MakeTemp for &'r [T] where T: 'static + for<'a> MakeArg<'a, Temp = Slot>, [T; 8]: smallvec::Array<Item = T> { const ARG_TYPE: ArgType = ArgType::Rest; type Temp = SmallVec<[T; 8]>; #[inline(always)] fn make_temp( vals: &[Slot], i: usize ) -> GResult<SmallVec<[T; 8]>> { GResult::<SmallVec<[T; 8]>>::from_iter((i .. vals.len()).map(|j| { let mut slot = vals[j].clone(); T::make_arg(&mut slot) })) } } impl<'a: 'r, 'r, T> MakeArg<'a> for &'r [T] where T: 'static + for<'a2> MakeArg<'a2, Temp = Slot>, [T; 8]: smallvec::Array<Item = T> { #[inline(always)] fn make_arg( temp: &'a mut SmallVec<[T; 8]> ) -> GResult<&'r [T]> { Ok(&temp[..]) } } // &mut [T] //----------------------------------------------------------------------------- impl<'r, T> MakeTemp for &'r mut [T] where T: 'static + for<'a> MakeArg<'a, Temp = Slot>, [T; 8]: smallvec::Array<Item = T> { const ARG_TYPE: ArgType = ArgType::Rest; type Temp = SmallVec<[T; 8]>; #[inline(always)] fn make_temp( vals: &[Slot], i: usize ) -> GResult<SmallVec<[T; 8]>> { GResult::<SmallVec<[T; 8]>>::from_iter((i .. vals.len()).map(|j| { let mut slot = vals[j].clone(); T::make_arg(&mut slot) })) } } impl<'a: 'r, 'r, T> MakeArg<'a> for &'r mut [T] where T: 'static + for<'a2> MakeArg<'a2, Temp = Slot>, [T; 8]: smallvec::Array<Item = T> { #[inline(always)] fn make_arg( temp: &'a mut SmallVec<[T; 8]> ) -> GResult<&'r mut [T]> { Ok(&mut temp[..]) } } // &Arr, &Str, etc. //----------------------------------------------------------------------------- macro_rules! impl_pointee_make_arg { ($(($pointee:ident, $variant:ident)),+) => ( $( impl<'r> MakeTemp for &'r $pointee { type Temp = Slot; #[inline(always)] fn make_temp( vals: &[Slot], i: usize ) -> GResult<Slot> { Ok(vals[i].clone()) } } impl<'a: 'r, 'r> MakeArg<'a> for &'r $pointee { #[inline(always)] fn make_arg( temp: &'a mut Slot ) -> GResult<&'r $pointee> { match *temp { Slot::$variant(ref gc) => Ok(&**gc), ref val => bail!("expected &{}, received {}", stringify!($pointee), (val.type_name())) } } } )+ ); } impl_pointee_make_arg!( (Arr, Arr), (Str, Str), (Tab, Tab), (GIter, GIter), (GFn, GFn), (Obj, Obj), (Class, Class), (Coro, Coro), (RData, RData) ); // &str, &Path, &CStr, &OsStr //----------------------------------------------------------------------------- impl<'r> MakeTemp for &'r str { type Temp = SmallVec<[u8; 128]>; #[inline(always)] fn make_temp( vals: &[Slot], i: usize ) -> GResult<SmallVec<[u8; 128]>> { let mut vec = SmallVec::<[u8; 128]>::new(); match vals[i] { Slot::Str(ref st) => write!(&mut vec, "{}", st).unwrap(), ref val => bail!("expected a &str, received {}", val.a_type_name()) } Ok(vec) } } impl<'a: 'r, 'r> MakeArg<'a> for &'r str { #[inline(always)] fn make_arg( temp: &'a mut SmallVec<[u8; 128]> ) -> GResult<&'r str> { Ok(str::from_utf8(&temp[..]).unwrap()) } } macro_rules! impl_make_arg_text_slice ( ($(($slice_type:ident, $owned_type:ident)),+) => ( $( impl<'r> MakeTemp for &'r $slice_type { type Temp = $owned_type; #[inline(always)] fn make_temp( vals: &[Slot], i: usize ) -> GResult<$owned_type> { let mut slot = vals[i].clone(); $owned_type::make_arg(&mut slot) } } impl<'a: 'r, 'r> MakeArg<'a> for &'r $slice_type { #[inline(always)] fn make_arg( temp: &'a mut $owned_type ) -> GResult<&'r $slice_type> { Ok(&**temp) } } )+ ); ); impl_make_arg_text_slice!( (Path, PathBuf), (CStr, CString), (OsStr, OsString) ); //------------------------------------------------------------------------------------------------- // lib!, rdata! //------------------------------------------------------------------------------------------------- /** Defines a library struct. The input must be a struct declaration. The macro defines that struct, implements the [`Lib` trait](trait.Lib.html) for the struct's type, and implements [`MakeArg`](trait.MakeArg.html) for shared and mutable references to the struct's type. lib! { struct Graphics { canvas: sdl2::render::Canvas<Window> } } impl Graphics { fn draw_rect(&self, rect: Rect) { self.canvas.draw_rect(rect).unwrap(); } } glsp::bind_rfn("draw-rect", rfn!(Graphics::draw_rect))?; When a reference to a library struct is bound as an `RFn` parameter, that parameter doesn't consume any input arguments. Instead, it will attempt to [borrow](trait.Lib.html#method.borrow) the library struct from the active `Runtime`. */ #[macro_export] macro_rules! lib { ( $(#[$struct_attr:meta])* $struct_vis:vis struct $lib:ident { $($struct_token:tt)* } ) => ( $(#[$struct_attr])* $struct_vis struct $lib { $($struct_token)* } $crate::lib_impls! { $lib } ); ( $(#[$struct_attr:meta])* $struct_vis:vis struct $lib:ident; ) => ( $(#[$struct_attr])* $struct_vis struct $lib; $crate::lib_impls! { $lib } ); ( $(#[$struct_attr:meta])* $struct_vis:vis struct $lib:ident ( $($struct_token:tt)* ); ) => ( $(#[$struct_attr])* $struct_vis struct $lib ( $($struct_token)* ); $crate::lib_impls! { $lib } ); } #[doc(hidden)] #[macro_export] macro_rules! lib_impls { ($lib:ident) => ( impl $crate::Lib for $lib { fn type_name() -> &'static str { stringify!($lib) } } impl<'r> $crate::MakeTemp for &'r $lib { const ARG_TYPE: $crate::ArgType = $crate::ArgType::Lib; type Temp = $crate::LibRef<$lib>; #[inline(always)] fn make_temp( _vals: &[$crate::Slot], _i: usize ) -> $crate::GResult<$crate::LibRef<$lib>> { $crate::try_lib::<$lib>() } } impl<'a: 'r, 'r> $crate::MakeArg<'a> for &'r $lib { #[inline(always)] fn make_arg( temp: &'a mut $crate::LibRef<$lib> ) -> $crate::GResult<&'r $lib> { Ok(&**temp) } } impl<'r> $crate::MakeTemp for &'r mut $lib { const ARG_TYPE: $crate::ArgType = $crate::ArgType::Lib; type Temp = $crate::LibRefMut<$lib>; #[inline(always)] fn make_temp( _vals: &[$crate::Slot], _i: usize ) -> $crate::GResult<$crate::LibRefMut<$lib>> { $crate::try_lib_mut::<$lib>() } } impl<'a: 'r, 'r> $crate::MakeArg<'a> for &'r mut $lib { #[inline(always)] fn make_arg( temp: &'a mut $crate::LibRefMut<$lib> ) -> $crate::GResult<&'r mut $lib> { Ok(&mut **temp) } } ); } /** Defines a struct which can be stored on the garbage-collected heap. The input must be a struct declaration, optionally followed by a `meths { ... }` block. The macro defines that struct, implements the [`RStore` trait](trait.RStore.html) for the struct's type, implements [`MakeArg`](trait.MakeArg.html) for shared and mutable references to the struct's type, and implements [`IntoResult`](trait.IntoResult.html) for the struct's value type. rdata! { #[derive(Clone)] struct AudioClip { samples: Vec<i16>, channels: Channels } meths { get "duration": AudioClip::duration, "play": AudioClip::play } } impl AudioClip { fn load<P: AsRef<Path>>(path: P) -> AudioClip { //... } fn duration(&self) -> usize { self.samples.len() / self.channels.count() } fn play(&self, mixer: &mut Mixer) { mixer.play_audio_clip(self); } } glsp::bind_rfn("AudioClip:load", AudioClip::load::<PathBuf>)?; When a reference to an `rdata!` struct is bound as an `RFn` parameter, that parameter expects an argument which belongs to the [`rdata` primitive type](struct.RData.html). That argument is [borrowed](struct.RData.html#method.borrow) for the duration of the function call. The `meths` block contains a comma-separated list of `"name": fn_expr` pairs. Each `fn_expr` is passed to the [`rfn!`](macro.rfn.html) macro, and the resulting [`WrappedFn`](struct.WrappedFn.html) is bound as a method which can be called by GameLisp code. Each `"name"` can be prefixed with the keyword `get` or `set` to bind it as a property getter or property setter, respectively. */ #[macro_export] macro_rules! rdata { ( $(#[$struct_attr:meta])* $struct_vis:vis struct $rdata:ident { $($struct_token:tt)* } $($rest:tt)* ) => ( $(#[$struct_attr])* $struct_vis struct $rdata { $($struct_token)* } $crate::rdata_impls! { $rdata; $($rest)* } ); ( $(#[$struct_attr:meta])* $struct_vis:vis struct $rdata:ident; $($rest:tt)* ) => ( $(#[$struct_attr])* $struct_vis struct $rdata; $crate::rdata_impls! { $rdata; $($rest)* } ); ( $(#[$struct_attr:meta])* $struct_vis:vis struct $rdata:ident ( $($struct_token:tt)* ); $($rest:tt)* ) => ( $(#[$struct_attr])* $struct_vis struct $rdata ( $($struct_token)* ); $crate::rdata_impls! { $rdata; $($rest)* } ); } #[doc(hidden)] #[macro_export] macro_rules! rdata_impls { ($rdata:ident;) => (rdata_impls!($rdata; meths { });); ( $rdata:ident; meths { $($($meth_kind:ident)? $meth_name:literal : $meth_expr:path,)+ } ) => ( rdata_impls!( $rdata; meths { $($($meth_kind)? $meth_name: $meth_expr),+ } ); ); ( $rdata:ident; meths { $($($meth_kind:ident)? $meth_name:literal : $meth_expr:path),* } ) => ( impl $crate::RStore for $rdata { fn type_name() -> &'static str { stringify!($rdata) } fn size_of() -> usize { std::mem::size_of::<$rdata>() } fn rclass() -> $crate::GResult<$crate::RClass> { $crate::RClass::from_vec( stringify!($rdata), std::vec![ $((stringify!($($meth_kind)?), $meth_name, $crate::rfn!($meth_expr))),* ] ) } } impl<'r> $crate::MakeTemp for &'r $rdata { type Temp = $crate::RRef<$rdata>; #[inline(always)] fn make_temp( vals: &[$crate::Slot], i: usize ) -> $crate::GResult<$crate::RRef<$rdata>> { match vals[i] { $crate::Slot::RData(ref rdata) => rdata.try_borrow(), ref val => bail!("expected {}, received {}", <$rdata as $crate::RStore>::type_name(), val.a_type_name()) } } } impl<'a: 'r, 'r> $crate::MakeArg<'a> for &'r $rdata { #[inline(always)] fn make_arg( temp: &'a mut $crate::RRef<$rdata> ) -> $crate::GResult<&'r $rdata> { Ok(&**temp) } } impl<'r> $crate::MakeTemp for &'r mut $rdata { type Temp = $crate::RRefMut<$rdata>; #[inline(always)] fn make_temp( vals: &[$crate::Slot], i: usize ) -> $crate::GResult<$crate::RRefMut<$rdata>> { match vals[i] { $crate::Slot::RData(ref rdata) => rdata.try_borrow_mut(), ref val => bail!("expected {}, received {}", <$rdata as $crate::RStore>::type_name(), val.a_type_name()) } } } impl<'a: 'r, 'r> $crate::MakeArg<'a> for &'r mut $rdata { #[inline(always)] fn make_arg( temp: &'a mut $crate::RRefMut<$rdata> ) -> $crate::GResult<&'r mut $rdata> { Ok(&mut **temp) } } impl $crate::IntoResult for $rdata { fn into_result(self) -> GResult<$crate::Slot> { Ok($crate::Slot::RData($crate::rdata(self)?.into_gc())) } } ); } //------------------------------------------------------------------------------------------------- // Callable, CallableOps, ToCallArgs //------------------------------------------------------------------------------------------------- /** A type-erased `callable`. Because this type implements the [`CallableOps` trait](trait.CallableOps.html), you can call it directly, without needing to access the underlying types. */ #[derive(Clone, Debug)] pub enum Callable { RFn(RFn), GFn(Root<GFn>), Class(Root<Class>) } /** The `callable` abstract type. [`glsp:call`](fn.call.html) can be used to call any type which implements this trait. This trait is [sealed]. It's not possible to implement this trait for your own types. [sealed]: https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed */ pub trait CallableOps: callable_ops_private::Sealed { #[doc(hidden)] fn receive_call(&self, arg_count: usize) -> GResult<Val>; ///Returns this function's registered name, if any. fn name(&self) -> Option<Sym>; ///Returns this function's minimum and maximum argument count. fn arg_limits(&self) -> (usize, Option<usize>); ///Returns this function's minimum argument count. fn min_args(&self) -> usize { self.arg_limits().0 } ///Returns this function's maximum argument count, if any. fn max_args(&self) -> Option<usize> { self.arg_limits().1 } } mod callable_ops_private { use crate::{class::Class, code::GFn, engine::RFn, gc::{Gc, Root}, wrap::Callable}; pub trait Sealed { } impl Sealed for Callable { } impl Sealed for RFn { } impl Sealed for Root<Class> { } impl Sealed for Gc<Class> { } impl Sealed for Root<GFn> { } impl Sealed for Gc<GFn> { } } impl CallableOps for Callable { fn receive_call(&self, arg_count: usize) -> GResult<Val> { match *self { Callable::RFn(rfn) => glsp::call_rfn(rfn, arg_count).map(|slot| slot.root()), Callable::GFn(ref gfn_root) => glsp::call_gfn(gfn_root, arg_count), Callable::Class(ref class_root) => Ok(Val::Obj(glsp::call_class(class_root, arg_count)?)) } } fn arg_limits(&self) -> (usize, Option<usize>) { match *self { Callable::RFn(rfn) => rfn.arg_limits(), Callable::GFn(ref gfn_root) => gfn_root.arg_limits(), Callable::Class(ref class_root) => class_root.arg_limits() } } fn name(&self) -> Option<Sym> { match *self { Callable::RFn(rfn) => rfn.name(), Callable::GFn(ref gfn_root) => gfn_root.name(), Callable::Class(ref class_root) => class_root.name() } } } /** A type which can be converted into the arguments to a function call. It's not possible to implement this trait for your own types, but it's implemented for tuples and vectors of various sizes, when their elements all implement [`ToVal`](trait.ToVal.html). Functions like [`glsp:call`](fn.call.html) and [`Obj::call`](struct.Obj.html#method.call) are generic over this trait. They usually define their arguments as `&T where T: ToCallArgs`, so tuples of arguments will need to be passed by reference: let push_rfn: RFn = glsp::global("push!"); glsp::call(&push_rfn, &(my_arr, 100i32))?; */ pub trait ToCallArgs: to_call_args_private::Sealed { fn arg_count(&self) -> usize; fn to_call_args<E: Extend<Slot>>(&self, dst: &mut E) -> GResult<()>; } mod to_call_args_private { use crate::{wrap::ToVal}; pub trait Sealed { } impl<T: ToVal> Sealed for [T] { } impl<T> Sealed for [T; 0] { } impl Sealed for () { } } impl<T: ToVal> ToCallArgs for [T] { fn arg_count(&self) -> usize { self.len() } fn to_call_args<E: Extend<Slot>>(&self, dst: &mut E) -> GResult<()> { let mut result = Ok(()); dst.extend(self.iter().map(|item| { match item.to_slot() { Ok(slot) => slot, Err(err) => { result = Err(err); Slot::Nil } } })); result } } impl<T> ToCallArgs for [T; 0] { fn arg_count(&self) -> usize { 0 } fn to_call_args<E: Extend<Slot>>(&self, _dst: &mut E) -> GResult<()> { Ok(()) } } macro_rules! impl_to_call_args_array { ($len:literal) => ( impl<T: ToVal> to_call_args_private::Sealed for [T; $len] { } impl<T: ToVal> ToCallArgs for [T; $len] { fn arg_count(&self) -> usize { $len } fn to_call_args<E: Extend<Slot>>(&self, dst: &mut E) -> GResult<()> { let mut result = Ok(()); dst.extend(self.iter().map(|item| { match item.to_slot() { Ok(slot) => slot, Err(err) => { result = Err(err); Slot::Nil } } })); result } } ) } impl_to_call_args_array!(1); impl_to_call_args_array!(2); impl_to_call_args_array!(3); impl_to_call_args_array!(4); impl_to_call_args_array!(5); impl_to_call_args_array!(6); impl_to_call_args_array!(7); impl_to_call_args_array!(8); impl_to_call_args_array!(9); impl_to_call_args_array!(10); impl_to_call_args_array!(11); impl_to_call_args_array!(12); impl ToCallArgs for () { fn arg_count(&self) -> usize { 0 } fn to_call_args<E: Extend<Slot>>(&self, _dst: &mut E) -> GResult<()> { Ok(()) } } macro_rules! impl_to_call_args_tuple { ($len:literal: $($t:ident $i:tt),+) => ( impl<$($t),+> to_call_args_private::Sealed for ($($t,)+) where $( $t: ToVal ),+ { } impl<$($t),+> ToCallArgs for ($($t,)+) where $( $t: ToVal ),+ { fn arg_count(&self) -> usize { $len } fn to_call_args<EE: Extend<Slot>>(&self, dst: &mut EE) -> GResult<()> { let slots = [ $( (self.$i).to_slot()? ),+ ]; dst.extend(slots.iter().cloned()); Ok(()) } } ); } impl_to_call_args_tuple!( 1: A 0); impl_to_call_args_tuple!( 2: A 0, B 1); impl_to_call_args_tuple!( 3: A 0, B 1, C 2); impl_to_call_args_tuple!( 4: A 0, B 1, C 2, D 3); impl_to_call_args_tuple!( 5: A 0, B 1, C 2, D 3, E 4); impl_to_call_args_tuple!( 6: A 0, B 1, C 2, D 3, E 4, F 5); impl_to_call_args_tuple!( 7: A 0, B 1, C 2, D 3, E 4, F 5, G 6); impl_to_call_args_tuple!( 8: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); impl_to_call_args_tuple!( 9: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); impl_to_call_args_tuple!(10: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); impl_to_call_args_tuple!(11: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); impl_to_call_args_tuple!(12: A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11);
26.452252
140
0.544004
1d0fdced46c52aec474f0db64bc91e3ddc668c04
4,190
use crate::alloc::{GlobalAlloc, Layout, System}; use crate::ptr; // use crate::sys_common::alloc::{realloc_fallback, MIN_ALIGN}; use super::vm_allocator::GLOBAL_VM_ALLOC; #[stable(feature = "alloc_system_type", since = "1.28.0")] unsafe impl GlobalAlloc for System { #[inline] unsafe fn alloc(&self, layout: Layout) -> *mut u8 { // jemalloc provides alignment less than MIN_ALIGN for small allocations. // So only rely on MIN_ALIGN if size >= align. // Also see <https://github.com/rust-lang/rust/issues/45955> and // <https://github.com/rust-lang/rust/issues/62251#issuecomment-507580914>. // if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { // libc::malloc(layout.size()) as *mut u8 // } else { // #[cfg(target_os = "macos")] // { // if layout.align() > (1 << 31) { // return ptr::null_mut(); // } // } // aligned_malloc(&layout) // } GLOBAL_VM_ALLOC.alloc(layout) } // #[inline] // unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { // // See the comment above in `alloc` for why this check looks the way it does. // // if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { // // libc::calloc(layout.size(), 1) as *mut u8 // // } else { // // let ptr = self.alloc(layout); // // if !ptr.is_null() { // // ptr::write_bytes(ptr, 0, layout.size()); // // } // // ptr // // } // } #[inline] unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { // libc::free(ptr as *mut libc::c_void) GLOBAL_VM_ALLOC.dealloc(ptr, _layout) } // #[inline] // unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { // // if layout.align() <= MIN_ALIGN && layout.align() <= new_size { // // libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 // // } else { // // realloc_fallback(self, ptr, layout, new_size) // // } // } } // #[cfg(any( // target_os = "android", // target_os = "illumos", // target_os = "redox", // target_os = "solaris" // ))] // #[inline] // unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { // // On android we currently target API level 9 which unfortunately // // doesn't have the `posix_memalign` API used below. Instead we use // // `memalign`, but this unfortunately has the property on some systems // // where the memory returned cannot be deallocated by `free`! // // // // Upon closer inspection, however, this appears to work just fine with // // Android, so for this platform we should be fine to call `memalign` // // (which is present in API level 9). Some helpful references could // // possibly be chromium using memalign [1], attempts at documenting that // // memalign + free is ok [2] [3], or the current source of chromium // // which still uses memalign on android [4]. // // // // [1]: https://codereview.chromium.org/10796020/ // // [2]: https://code.google.com/p/android/issues/detail?id=35391 // // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579 // // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/ // // /memory/aligned_memory.cc // libc::memalign(layout.align(), layout.size()) as *mut u8 // } // #[cfg(not(any( // target_os = "android", // target_os = "illumos", // target_os = "redox", // target_os = "solaris" // )))] // #[inline] // unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { // let mut out = ptr::null_mut(); // // posix_memalign requires that the alignment be a multiple of `sizeof(void*)`. // // Since these are all powers of 2, we can just use max. // let align = layout.align().max(crate::mem::size_of::<usize>()); // let ret = libc::posix_memalign(&mut out, align, layout.size()); // if ret != 0 { ptr::null_mut() } else { out as *mut u8 } // }
41.485149
91
0.561814
2fc558d6bf33910db09713356374234e47a146b3
309,977
//! The `rpc` module implements the Solana RPC interface. use { crate::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, parsed_token_accounts::*, rpc_health::*, }, bincode::{config::Options, serialize}, jsonrpc_core::{futures::future, types::error, BoxFuture, Error, Metadata, Result}, jsonrpc_derive::rpc, serde::{Deserialize, Serialize}, solana_account_decoder::{ parse_token::{spl_token_id, token_amount_to_ui_amount, UiTokenAmount}, UiAccount, UiAccountEncoding, UiDataSliceConfig, MAX_BASE58_BYTES, }, solana_client::{ rpc_cache::LargestAccountsCache, rpc_config::*, rpc_custom_error::RpcCustomError, rpc_deprecated_config::*, rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, rpc_request::{ TokenAccountsFilter, DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_GET_CONFIRMED_BLOCKS_RANGE, MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT, MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE, MAX_GET_PROGRAM_ACCOUNT_FILTERS, MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, MAX_GET_SLOT_LEADERS, MAX_MULTIPLE_ACCOUNTS, NUM_LARGEST_ACCOUNTS, }, rpc_response::Response as RpcResponse, rpc_response::*, }, solana_faucet::faucet::request_airdrop_transaction, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_ledger::{ blockstore::Blockstore, blockstore_db::BlockstoreError, get_tmp_ledger_path, leader_schedule_cache::LeaderScheduleCache, }, solana_metrics::inc_new_counter_info, solana_perf::packet::PACKET_DATA_SIZE, solana_runtime::{ accounts::AccountAddressFilter, accounts_index::{AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig}, bank::{Bank, TransactionSimulationResult}, bank_forks::BankForks, commitment::{BlockCommitmentArray, BlockCommitmentCache, CommitmentSlots}, inline_spl_token::{SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, non_circulating_supply::calculate_non_circulating_supply, snapshot_config::SnapshotConfig, snapshot_utils, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, clock::{Slot, UnixTimestamp, MAX_RECENT_BLOCKHASHES}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, epoch_schedule::EpochSchedule, exit::Exit, feature_set::{self, nonce_must_be_writable}, fee_calculator::FeeCalculator, hash::Hash, message::{Message, SanitizedMessage}, pubkey::{Pubkey, PUBKEY_BYTES}, signature::{Keypair, Signature, Signer}, stake::state::{StakeActivationStatus, StakeState}, stake_history::StakeHistory, system_instruction, sysvar::stake_history, transaction::{self, SanitizedTransaction, TransactionError, VersionedTransaction}, }, solana_send_transaction_service::{ send_transaction_service::{SendTransactionService, TransactionInfo}, tpu_info::NullTpuInfo, }, solana_streamer::socket::SocketAddrSpace, solana_transaction_status::{ ConfirmedBlock, EncodedConfirmedTransaction, Reward, RewardType, TransactionConfirmationStatus, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding, }, solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY}, spl_token::{ solana_program::program_pack::Pack, state::{Account as TokenAccount, Mint}, }, std::{ any::type_name, cmp::{max, min}, collections::{HashMap, HashSet}, convert::TryFrom, net::SocketAddr, str::FromStr, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, mpsc::{channel, Receiver, Sender}, Arc, Mutex, RwLock, }, time::Duration, }, }; type RpcCustomResult<T> = std::result::Result<T, RpcCustomError>; pub const MAX_REQUEST_PAYLOAD_SIZE: usize = 50 * (1 << 10); // 50kB pub const PERFORMANCE_SAMPLES_LIMIT: usize = 720; // Limit the length of the `epoch_credits` array for each validator in a `get_vote_accounts` // response const MAX_RPC_EPOCH_CREDITS_HISTORY: usize = 5; fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> { let context = RpcResponseContext { slot: bank.slot() }; Response { context, value } } /// Wrapper for rpc return types of methods that provide responses both with and without context. /// Main purpose of this is to fix methods that lack context information in their return type, /// without breaking backwards compatibility. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum OptionalContext<T> { Context(RpcResponse<T>), NoContext(T), } fn is_finalized( block_commitment_cache: &BlockCommitmentCache, bank: &Bank, blockstore: &Blockstore, slot: Slot, ) -> bool { slot <= block_commitment_cache.highest_confirmed_root() && (blockstore.is_root(slot) || bank.status_cache_ancestors().contains(&slot)) } #[derive(Debug, Default, Clone)] pub struct JsonRpcConfig { pub enable_rpc_transaction_history: bool, pub enable_cpi_and_log_storage: bool, pub faucet_addr: Option<SocketAddr>, pub health_check_slot_distance: u64, pub enable_bigtable_ledger_storage: bool, pub enable_bigtable_ledger_upload: bool, pub max_multiple_accounts: Option<usize>, pub account_indexes: AccountSecondaryIndexes, pub rpc_threads: usize, pub rpc_niceness_adj: i8, pub rpc_bigtable_timeout: Option<Duration>, pub minimal_api: bool, pub obsolete_v1_7_api: bool, pub rpc_scan_and_fix_roots: bool, } #[derive(Clone)] pub struct JsonRpcRequestProcessor { bank_forks: Arc<RwLock<BankForks>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, blockstore: Arc<Blockstore>, config: JsonRpcConfig, snapshot_config: Option<SnapshotConfig>, #[allow(dead_code)] validator_exit: Arc<RwLock<Exit>>, health: Arc<RpcHealth>, cluster_info: Arc<ClusterInfo>, genesis_hash: Hash, transaction_sender: Arc<Mutex<Sender<TransactionInfo>>>, bigtable_ledger_storage: Option<solana_storage_bigtable::LedgerStorage>, optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>, largest_accounts_cache: Arc<RwLock<LargestAccountsCache>>, max_slots: Arc<MaxSlots>, leader_schedule_cache: Arc<LeaderScheduleCache>, max_complete_transaction_status_slot: Arc<AtomicU64>, } impl Metadata for JsonRpcRequestProcessor {} impl JsonRpcRequestProcessor { #[allow(deprecated)] fn bank(&self, commitment: Option<CommitmentConfig>) -> Arc<Bank> { debug!("RPC commitment_config: {:?}", commitment); let r_bank_forks = self.bank_forks.read().unwrap(); let commitment = commitment.unwrap_or_default(); if commitment.is_confirmed() { let bank = self .optimistically_confirmed_bank .read() .unwrap() .bank .clone(); debug!("RPC using optimistically confirmed slot: {:?}", bank.slot()); return bank; } let slot = self .block_commitment_cache .read() .unwrap() .slot_with_commitment(commitment.commitment); match commitment.commitment { // Recent variant is deprecated CommitmentLevel::Recent | CommitmentLevel::Processed => { debug!("RPC using the heaviest slot: {:?}", slot); } // Root variant is deprecated CommitmentLevel::Root => { debug!("RPC using node root: {:?}", slot); } // Single variant is deprecated CommitmentLevel::Single => { debug!("RPC using confirmed slot: {:?}", slot); } // Max variant is deprecated CommitmentLevel::Max | CommitmentLevel::Finalized => { debug!("RPC using block: {:?}", slot); } CommitmentLevel::SingleGossip | CommitmentLevel::Confirmed => unreachable!(), // SingleGossip variant is deprecated }; r_bank_forks.get(slot).cloned().unwrap_or_else(|| { // We log a warning instead of returning an error, because all known error cases // are due to known bugs that should be fixed instead. // // The slot may not be found as a result of a known bug in snapshot creation, where // the bank at the given slot was not included in the snapshot. // Also, it may occur after an old bank has been purged from BankForks and a new // BlockCommitmentCache has not yet arrived. To make this case impossible, // BlockCommitmentCache should hold an `Arc<Bank>` everywhere it currently holds // a slot. // // For more information, see https://github.com/solana-labs/solana/issues/11078 warn!( "Bank with {:?} not found at slot: {:?}", commitment.commitment, slot ); r_bank_forks.root_bank() }) } fn genesis_creation_time(&self) -> UnixTimestamp { self.bank(None).genesis_creation_time() } #[allow(clippy::too_many_arguments)] pub fn new( config: JsonRpcConfig, snapshot_config: Option<SnapshotConfig>, bank_forks: Arc<RwLock<BankForks>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, blockstore: Arc<Blockstore>, validator_exit: Arc<RwLock<Exit>>, health: Arc<RpcHealth>, cluster_info: Arc<ClusterInfo>, genesis_hash: Hash, bigtable_ledger_storage: Option<solana_storage_bigtable::LedgerStorage>, optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>, largest_accounts_cache: Arc<RwLock<LargestAccountsCache>>, max_slots: Arc<MaxSlots>, leader_schedule_cache: Arc<LeaderScheduleCache>, max_complete_transaction_status_slot: Arc<AtomicU64>, ) -> (Self, Receiver<TransactionInfo>) { let (sender, receiver) = channel(); ( Self { config, snapshot_config, bank_forks, block_commitment_cache, blockstore, validator_exit, health, cluster_info, genesis_hash, transaction_sender: Arc::new(Mutex::new(sender)), bigtable_ledger_storage, optimistically_confirmed_bank, largest_accounts_cache, max_slots, leader_schedule_cache, max_complete_transaction_status_slot, }, receiver, ) } // Useful for unit testing pub fn new_from_bank(bank: &Arc<Bank>, socket_addr_space: SocketAddrSpace) -> Self { let genesis_hash = bank.hash(); let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks( &[bank.clone()], bank.slot(), ))); let blockstore = Arc::new(Blockstore::open(&get_tmp_ledger_path!()).unwrap()); let exit = Arc::new(AtomicBool::new(false)); let cluster_info = Arc::new(ClusterInfo::new( ContactInfo::default(), Arc::new(Keypair::new()), socket_addr_space, )); let tpu_address = cluster_info.my_contact_info().tpu; let (sender, receiver) = channel(); SendTransactionService::new::<NullTpuInfo>( tpu_address, &bank_forks, None, receiver, 1000, 1, ); Self { config: JsonRpcConfig::default(), snapshot_config: None, bank_forks, block_commitment_cache: Arc::new(RwLock::new(BlockCommitmentCache::new( HashMap::new(), 0, CommitmentSlots::new_from_slot(bank.slot()), ))), blockstore, validator_exit: create_validator_exit(&exit), health: Arc::new(RpcHealth::new(cluster_info.clone(), None, 0, exit.clone())), cluster_info, genesis_hash, transaction_sender: Arc::new(Mutex::new(sender)), bigtable_ledger_storage: None, optimistically_confirmed_bank: Arc::new(RwLock::new(OptimisticallyConfirmedBank { bank: bank.clone(), })), largest_accounts_cache: Arc::new(RwLock::new(LargestAccountsCache::new(30))), max_slots: Arc::new(MaxSlots::default()), leader_schedule_cache: Arc::new(LeaderScheduleCache::new_from_bank(bank)), max_complete_transaction_status_slot: Arc::new(AtomicU64::default()), } } pub fn get_account_info( &self, pubkey: &Pubkey, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Option<UiAccount>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); check_slice_and_encoding(&encoding, config.data_slice.is_some())?; let response = get_encoded_account(&bank, pubkey, encoding, config.data_slice)?; Ok(new_response(&bank, response)) } pub fn get_multiple_accounts( &self, pubkeys: Vec<Pubkey>, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<Option<UiAccount>>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Base64); check_slice_and_encoding(&encoding, config.data_slice.is_some())?; let accounts = pubkeys .into_iter() .map(|pubkey| get_encoded_account(&bank, &pubkey, encoding, config.data_slice)) .collect::<Result<Vec<_>>>()?; Ok(new_response(&bank, accounts)) } pub fn get_minimum_balance_for_rent_exemption( &self, data_len: usize, commitment: Option<CommitmentConfig>, ) -> u64 { self.bank(commitment) .get_minimum_balance_for_rent_exemption(data_len) } pub fn get_program_accounts( &self, program_id: &Pubkey, config: Option<RpcAccountInfoConfig>, mut filters: Vec<RpcFilterType>, with_context: bool, ) -> Result<OptionalContext<Vec<RpcKeyedAccount>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); let data_slice_config = config.data_slice; check_slice_and_encoding(&encoding, data_slice_config.is_some())?; optimize_filters(&mut filters); let keyed_accounts = { if let Some(owner) = get_spl_token_owner_filter(program_id, &filters) { self.get_filtered_spl_token_accounts_by_owner(&bank, &owner, filters)? } else if let Some(mint) = get_spl_token_mint_filter(program_id, &filters) { self.get_filtered_spl_token_accounts_by_mint(&bank, &mint, filters)? } else { self.get_filtered_program_accounts(&bank, program_id, filters)? } }; let result = if program_id == &spl_token_id() && encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() } else { keyed_accounts .into_iter() .map(|(pubkey, account)| { Ok(RpcKeyedAccount { pubkey: pubkey.to_string(), account: encode_account(&account, &pubkey, encoding, data_slice_config)?, }) }) .collect::<Result<Vec<_>>>()? }; Ok(result).map(|result| match with_context { true => OptionalContext::Context(new_response(&bank, result)), false => OptionalContext::NoContext(result), }) } pub async fn get_inflation_reward( &self, addresses: Vec<Pubkey>, config: Option<RpcEpochConfig>, ) -> Result<Vec<Option<RpcInflationReward>>> { let config = config.unwrap_or_default(); let epoch_schedule = self.get_epoch_schedule(); let first_available_block = self.get_first_available_block().await; let epoch = config.epoch.unwrap_or_else(|| { epoch_schedule .get_epoch(self.get_slot(config.commitment)) .saturating_sub(1) }); // Rewards for this epoch are found in the first confirmed block of the next epoch let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch.saturating_add(1)); if first_slot_in_epoch < first_available_block { if self.bigtable_ledger_storage.is_some() { return Err(RpcCustomError::LongTermStorageSlotSkipped { slot: first_slot_in_epoch, } .into()); } else { return Err(RpcCustomError::BlockCleanedUp { slot: first_slot_in_epoch, first_available_block, } .into()); } } let first_confirmed_block_in_epoch = *self .get_blocks_with_limit(first_slot_in_epoch, 1, config.commitment) .await? .get(0) .ok_or(RpcCustomError::BlockNotAvailable { slot: first_slot_in_epoch, })?; let first_confirmed_block = if let Ok(Some(first_confirmed_block)) = self .get_block( first_confirmed_block_in_epoch, Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), ) .await { first_confirmed_block } else { return Err(RpcCustomError::BlockNotAvailable { slot: first_confirmed_block_in_epoch, } .into()); }; let addresses: Vec<String> = addresses .into_iter() .map(|pubkey| pubkey.to_string()) .collect(); let reward_hash: HashMap<String, Reward> = first_confirmed_block .rewards .unwrap_or_default() .into_iter() .filter_map(|reward| match reward.reward_type? { RewardType::Staking | RewardType::Voting => addresses .contains(&reward.pubkey) .then(|| (reward.clone().pubkey, reward)), _ => None, }) .collect(); let rewards = addresses .iter() .map(|address| { if let Some(reward) = reward_hash.get(address) { return Some(RpcInflationReward { epoch, effective_slot: first_confirmed_block_in_epoch, amount: reward.lamports.abs() as u64, post_balance: reward.post_balance, commission: reward.commission, }); } None }) .collect(); Ok(rewards) } pub fn get_inflation_governor( &self, commitment: Option<CommitmentConfig>, ) -> RpcInflationGovernor { self.bank(commitment).inflation().into() } pub fn get_inflation_rate(&self) -> RpcInflationRate { let bank = self.bank(None); let epoch = bank.epoch(); let inflation = bank.inflation(); let slot_in_year = bank.slot_in_year_for_inflation(); RpcInflationRate { total: inflation.total(slot_in_year), validator: inflation.validator(slot_in_year), foundation: inflation.foundation(slot_in_year), epoch, } } pub fn get_epoch_schedule(&self) -> EpochSchedule { // Since epoch schedule data comes from the genesis config, any commitment level should be // fine let bank = self.bank(Some(CommitmentConfig::finalized())); *bank.epoch_schedule() } pub fn get_balance( &self, pubkey: &Pubkey, commitment: Option<CommitmentConfig>, ) -> RpcResponse<u64> { let bank = self.bank(commitment); new_response(&bank, bank.get_balance(pubkey)) } fn get_recent_blockhash( &self, commitment: Option<CommitmentConfig>, ) -> RpcResponse<RpcBlockhashFeeCalculator> { let bank = self.bank(commitment); let blockhash = bank.confirmed_last_blockhash(); let lamports_per_signature = bank .get_lamports_per_signature_for_blockhash(&blockhash) .unwrap(); new_response( &bank, RpcBlockhashFeeCalculator { blockhash: blockhash.to_string(), fee_calculator: FeeCalculator::new(lamports_per_signature), }, ) } fn get_fees(&self, commitment: Option<CommitmentConfig>) -> RpcResponse<RpcFees> { let bank = self.bank(commitment); let blockhash = bank.confirmed_last_blockhash(); let lamports_per_signature = bank .get_lamports_per_signature_for_blockhash(&blockhash) .unwrap(); #[allow(deprecated)] let last_valid_slot = bank .get_blockhash_last_valid_slot(&blockhash) .expect("bank blockhash queue should contain blockhash"); let last_valid_block_height = bank .get_blockhash_last_valid_block_height(&blockhash) .expect("bank blockhash queue should contain blockhash"); new_response( &bank, RpcFees { blockhash: blockhash.to_string(), fee_calculator: FeeCalculator::new(lamports_per_signature), last_valid_slot, last_valid_block_height, }, ) } fn get_fee_calculator_for_blockhash( &self, blockhash: &Hash, commitment: Option<CommitmentConfig>, ) -> RpcResponse<Option<RpcFeeCalculator>> { let bank = self.bank(commitment); let lamports_per_signature = bank.get_lamports_per_signature_for_blockhash(blockhash); new_response( &bank, lamports_per_signature.map(|lamports_per_signature| RpcFeeCalculator { fee_calculator: FeeCalculator::new(lamports_per_signature), }), ) } fn get_fee_rate_governor(&self) -> RpcResponse<RpcFeeRateGovernor> { let bank = self.bank(None); #[allow(deprecated)] let fee_rate_governor = bank.get_fee_rate_governor(); new_response( &bank, RpcFeeRateGovernor { fee_rate_governor: fee_rate_governor.clone(), }, ) } pub fn confirm_transaction( &self, signature: &Signature, commitment: Option<CommitmentConfig>, ) -> RpcResponse<bool> { let bank = self.bank(commitment); let status = bank.get_signature_status(signature); match status { Some(status) => new_response(&bank, status.is_ok()), None => new_response(&bank, false), } } fn get_block_commitment(&self, block: Slot) -> RpcBlockCommitment<BlockCommitmentArray> { let r_block_commitment = self.block_commitment_cache.read().unwrap(); RpcBlockCommitment { commitment: r_block_commitment .get_block_commitment(block) .map(|block_commitment| block_commitment.commitment), total_stake: r_block_commitment.total_stake(), } } fn get_slot(&self, commitment: Option<CommitmentConfig>) -> Slot { self.bank(commitment).slot() } fn get_block_height(&self, commitment: Option<CommitmentConfig>) -> u64 { self.bank(commitment).block_height() } fn get_max_retransmit_slot(&self) -> Slot { self.max_slots.retransmit.load(Ordering::Relaxed) } fn get_max_shred_insert_slot(&self) -> Slot { self.max_slots.shred_insert.load(Ordering::Relaxed) } fn get_slot_leader(&self, commitment: Option<CommitmentConfig>) -> String { self.bank(commitment).collector_id().to_string() } fn get_slot_leaders( &self, commitment: Option<CommitmentConfig>, start_slot: Slot, limit: usize, ) -> Result<Vec<Pubkey>> { let bank = self.bank(commitment); let (mut epoch, mut slot_index) = bank.epoch_schedule().get_epoch_and_slot_index(start_slot); let mut slot_leaders = Vec::with_capacity(limit); while slot_leaders.len() < limit { if let Some(leader_schedule) = self.leader_schedule_cache.get_epoch_leader_schedule(epoch) { slot_leaders.extend( leader_schedule .get_slot_leaders() .iter() .skip(slot_index as usize) .take(limit.saturating_sub(slot_leaders.len())), ); } else { return Err(Error::invalid_params(format!( "Invalid slot range: leader schedule for epoch {} is unavailable", epoch ))); } epoch += 1; slot_index = 0; } Ok(slot_leaders) } fn minimum_ledger_slot(&self) -> Result<Slot> { match self.blockstore.slot_meta_iterator(0) { Ok(mut metas) => match metas.next() { Some((slot, _meta)) => Ok(slot), None => Err(Error::invalid_request()), }, Err(err) => { warn!("slot_meta_iterator failed: {:?}", err); Err(Error::invalid_request()) } } } fn get_transaction_count(&self, commitment: Option<CommitmentConfig>) -> u64 { self.bank(commitment).transaction_count() as u64 } fn get_total_supply(&self, commitment: Option<CommitmentConfig>) -> u64 { self.bank(commitment).capitalization() } fn get_cached_largest_accounts( &self, filter: &Option<RpcLargestAccountsFilter>, ) -> Option<(u64, Vec<RpcAccountBalance>)> { let largest_accounts_cache = self.largest_accounts_cache.read().unwrap(); largest_accounts_cache.get_largest_accounts(filter) } fn set_cached_largest_accounts( &self, filter: &Option<RpcLargestAccountsFilter>, slot: u64, accounts: &[RpcAccountBalance], ) { let mut largest_accounts_cache = self.largest_accounts_cache.write().unwrap(); largest_accounts_cache.set_largest_accounts(filter, slot, accounts) } fn get_largest_accounts( &self, config: Option<RpcLargestAccountsConfig>, ) -> RpcCustomResult<RpcResponse<Vec<RpcAccountBalance>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); if let Some((slot, accounts)) = self.get_cached_largest_accounts(&config.filter) { Ok(Response { context: RpcResponseContext { slot }, value: accounts, }) } else { let (addresses, address_filter) = if let Some(filter) = config.clone().filter { let non_circulating_supply = calculate_non_circulating_supply(&bank).map_err(|e| { RpcCustomError::ScanError { message: e.to_string(), } })?; let addresses = non_circulating_supply.accounts.into_iter().collect(); let address_filter = match filter { RpcLargestAccountsFilter::Circulating => AccountAddressFilter::Exclude, RpcLargestAccountsFilter::NonCirculating => AccountAddressFilter::Include, }; (addresses, address_filter) } else { (HashSet::new(), AccountAddressFilter::Exclude) }; let accounts = bank .get_largest_accounts(NUM_LARGEST_ACCOUNTS, &addresses, address_filter) .map_err(|e| RpcCustomError::ScanError { message: e.to_string(), })? .into_iter() .map(|(address, lamports)| RpcAccountBalance { address: address.to_string(), lamports, }) .collect::<Vec<RpcAccountBalance>>(); self.set_cached_largest_accounts(&config.filter, bank.slot(), &accounts); Ok(new_response(&bank, accounts)) } } fn get_supply( &self, config: Option<RpcSupplyConfig>, ) -> RpcCustomResult<RpcResponse<RpcSupply>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let non_circulating_supply = calculate_non_circulating_supply(&bank).map_err(|e| RpcCustomError::ScanError { message: e.to_string(), })?; let total_supply = bank.capitalization(); let non_circulating_accounts = if config.exclude_non_circulating_accounts_list { vec![] } else { non_circulating_supply .accounts .iter() .map(|pubkey| pubkey.to_string()) .collect() }; Ok(new_response( &bank, RpcSupply { total: total_supply, circulating: total_supply - non_circulating_supply.lamports, non_circulating: non_circulating_supply.lamports, non_circulating_accounts, }, )) } fn get_vote_accounts( &self, config: Option<RpcGetVoteAccountsConfig>, ) -> Result<RpcVoteAccountStatus> { let config = config.unwrap_or_default(); let filter_by_vote_pubkey = if let Some(ref vote_pubkey) = config.vote_pubkey { Some(verify_pubkey(vote_pubkey)?) } else { None }; let bank = self.bank(config.commitment); let vote_accounts = bank.vote_accounts(); let epoch_vote_accounts = bank .epoch_vote_accounts(bank.get_epoch_and_slot_index(bank.slot()).0) .ok_or_else(Error::invalid_request)?; let default_vote_state = VoteState::default(); let delinquent_validator_slot_distance = config .delinquent_slot_distance .unwrap_or(DELINQUENT_VALIDATOR_SLOT_DISTANCE); let (current_vote_accounts, delinquent_vote_accounts): ( Vec<RpcVoteAccountInfo>, Vec<RpcVoteAccountInfo>, ) = vote_accounts .iter() .filter_map(|(vote_pubkey, (activated_stake, account))| { if let Some(filter_by_vote_pubkey) = filter_by_vote_pubkey { if *vote_pubkey != filter_by_vote_pubkey { return None; } } let vote_state = account.vote_state(); let vote_state = vote_state.as_ref().unwrap_or(&default_vote_state); let last_vote = if let Some(vote) = vote_state.votes.iter().last() { vote.slot } else { 0 }; let epoch_credits = vote_state.epoch_credits(); let epoch_credits = if epoch_credits.len() > MAX_RPC_EPOCH_CREDITS_HISTORY { epoch_credits .iter() .skip(epoch_credits.len() - MAX_RPC_EPOCH_CREDITS_HISTORY) .cloned() .collect() } else { epoch_credits.clone() }; Some(RpcVoteAccountInfo { vote_pubkey: vote_pubkey.to_string(), node_pubkey: vote_state.node_pubkey.to_string(), activated_stake: *activated_stake, commission: vote_state.commission, root_slot: vote_state.root_slot.unwrap_or(0), epoch_credits, epoch_vote_account: epoch_vote_accounts.contains_key(vote_pubkey), last_vote, }) }) .partition(|vote_account_info| { if bank.slot() >= delinquent_validator_slot_distance as u64 { vote_account_info.last_vote > bank.slot() - delinquent_validator_slot_distance as u64 } else { vote_account_info.last_vote > 0 } }); let keep_unstaked_delinquents = config.keep_unstaked_delinquents.unwrap_or_default(); let delinquent_vote_accounts = if !keep_unstaked_delinquents { delinquent_vote_accounts .into_iter() .filter(|vote_account_info| vote_account_info.activated_stake > 0) .collect::<Vec<_>>() } else { delinquent_vote_accounts }; Ok(RpcVoteAccountStatus { current: current_vote_accounts, delinquent: delinquent_vote_accounts, }) } fn check_blockstore_root<T>( &self, result: &std::result::Result<T, BlockstoreError>, slot: Slot, ) -> Result<()> where T: std::fmt::Debug, { if result.is_err() { let err = result.as_ref().unwrap_err(); debug!( "check_blockstore_root, slot: {:?}, max root: {:?}, err: {:?}", slot, self.blockstore.max_root(), err ); if slot >= self.blockstore.max_root() { return Err(RpcCustomError::BlockNotAvailable { slot }.into()); } if self.blockstore.is_skipped(slot) { return Err(RpcCustomError::SlotSkipped { slot }.into()); } } Ok(()) } fn check_slot_cleaned_up<T>( &self, result: &std::result::Result<T, BlockstoreError>, slot: Slot, ) -> Result<()> where T: std::fmt::Debug, { if result.is_err() { if let BlockstoreError::SlotCleanedUp = result.as_ref().unwrap_err() { return Err(RpcCustomError::BlockCleanedUp { slot, first_available_block: self .blockstore .get_first_available_block() .unwrap_or_default(), } .into()); } } Ok(()) } fn check_bigtable_result<T>( &self, result: &std::result::Result<T, solana_storage_bigtable::Error>, ) -> Result<()> where T: std::fmt::Debug, { if result.is_err() { let err = result.as_ref().unwrap_err(); if let solana_storage_bigtable::Error::BlockNotFound(slot) = err { return Err(RpcCustomError::LongTermStorageSlotSkipped { slot: *slot }.into()); } } Ok(()) } fn check_status_is_complete(&self, slot: Slot) -> Result<()> { if slot > self .max_complete_transaction_status_slot .load(Ordering::SeqCst) { Err(RpcCustomError::BlockStatusNotAvailableYet { slot }.into()) } else { Ok(()) } } pub async fn get_block( &self, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcBlockConfig>>, ) -> Result<Option<UiConfirmedBlock>> { if self.config.enable_rpc_transaction_history { let config = config .map(|config| config.convert_to_current()) .unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json); let transaction_details = config.transaction_details.unwrap_or_default(); let show_rewards = config.rewards.unwrap_or(true); let commitment = config.commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; // Block is old enough to be finalized if slot <= self .block_commitment_cache .read() .unwrap() .highest_confirmed_root() { self.check_status_is_complete(slot)?; let result = self.blockstore.get_rooted_block(slot, true); self.check_blockstore_root(&result, slot)?; let configure_block = |confirmed_block: ConfirmedBlock| { let mut confirmed_block = confirmed_block.configure(encoding, transaction_details, show_rewards); if slot == 0 { confirmed_block.block_time = Some(self.genesis_creation_time()); confirmed_block.block_height = Some(0); } confirmed_block }; if result.is_err() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = bigtable_ledger_storage.get_confirmed_block(slot).await; self.check_bigtable_result(&bigtable_result)?; return Ok(bigtable_result.ok().map(configure_block)); } } self.check_slot_cleaned_up(&result, slot)?; return Ok(result.ok().map(configure_block)); } else if commitment.is_confirmed() { // Check if block is confirmed let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); if confirmed_bank.status_cache_ancestors().contains(&slot) { self.check_status_is_complete(slot)?; let result = self.blockstore.get_complete_block(slot, true); return Ok(result.ok().map(|mut confirmed_block| { if confirmed_block.block_time.is_none() || confirmed_block.block_height.is_none() { let r_bank_forks = self.bank_forks.read().unwrap(); let bank = r_bank_forks.get(slot).cloned(); if let Some(bank) = bank { if confirmed_block.block_time.is_none() { confirmed_block.block_time = Some(bank.clock().unix_timestamp); } if confirmed_block.block_height.is_none() { confirmed_block.block_height = Some(bank.block_height()); } } } confirmed_block.configure(encoding, transaction_details, show_rewards) })); } } } else { return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); } Err(RpcCustomError::BlockNotAvailable { slot }.into()) } pub async fn get_blocks( &self, start_slot: Slot, end_slot: Option<Slot>, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>> { let commitment = commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; let highest_confirmed_root = self .block_commitment_cache .read() .unwrap() .highest_confirmed_root(); let end_slot = min( end_slot.unwrap_or_else(|| start_slot.saturating_add(MAX_GET_CONFIRMED_BLOCKS_RANGE)), if commitment.is_finalized() { highest_confirmed_root } else { self.bank(Some(CommitmentConfig::confirmed())).slot() }, ); if end_slot < start_slot { return Ok(vec![]); } if end_slot - start_slot > MAX_GET_CONFIRMED_BLOCKS_RANGE { return Err(Error::invalid_params(format!( "Slot range too large; max {}", MAX_GET_CONFIRMED_BLOCKS_RANGE ))); } let lowest_blockstore_slot = self.blockstore.lowest_slot(); if start_slot < lowest_blockstore_slot { // If the starting slot is lower than what's available in blockstore assume the entire // [start_slot..end_slot] can be fetched from BigTable. This range should not ever run // into unfinalized confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return bigtable_ledger_storage .get_confirmed_blocks(start_slot, (end_slot - start_slot) as usize + 1) // increment limit by 1 to ensure returned range is inclusive of both start_slot and end_slot .await .map(|mut bigtable_blocks| { bigtable_blocks.retain(|&slot| slot <= end_slot); bigtable_blocks }) .map_err(|_| { Error::invalid_params( "BigTable query failed (maybe timeout due to too large range?)" .to_string(), ) }); } } // Finalized blocks let mut blocks: Vec<_> = self .blockstore .rooted_slot_iterator(max(start_slot, lowest_blockstore_slot)) .map_err(|_| Error::internal_error())? .filter(|&slot| slot <= end_slot && slot <= highest_confirmed_root) .collect(); let last_element = blocks .last() .cloned() .unwrap_or_else(|| start_slot.saturating_sub(1)); // Maybe add confirmed blocks if commitment.is_confirmed() && last_element < end_slot { let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); let mut confirmed_blocks = confirmed_bank .status_cache_ancestors() .into_iter() .filter(|&slot| slot <= end_slot && slot > last_element) .collect(); blocks.append(&mut confirmed_blocks); } Ok(blocks) } pub async fn get_blocks_with_limit( &self, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>> { let commitment = commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; if limit > MAX_GET_CONFIRMED_BLOCKS_RANGE as usize { return Err(Error::invalid_params(format!( "Limit too large; max {}", MAX_GET_CONFIRMED_BLOCKS_RANGE ))); } let lowest_blockstore_slot = self.blockstore.lowest_slot(); if start_slot < lowest_blockstore_slot { // If the starting slot is lower than what's available in blockstore assume the entire // range can be fetched from BigTable. This range should not ever run into unfinalized // confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return Ok(bigtable_ledger_storage .get_confirmed_blocks(start_slot, limit) .await .unwrap_or_default()); } } let highest_confirmed_root = self .block_commitment_cache .read() .unwrap() .highest_confirmed_root(); // Finalized blocks let mut blocks: Vec<_> = self .blockstore .rooted_slot_iterator(max(start_slot, lowest_blockstore_slot)) .map_err(|_| Error::internal_error())? .take(limit) .filter(|&slot| slot <= highest_confirmed_root) .collect(); // Maybe add confirmed blocks if commitment.is_confirmed() && blocks.len() < limit { let last_element = blocks .last() .cloned() .unwrap_or_else(|| start_slot.saturating_sub(1)); let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); let mut confirmed_blocks = confirmed_bank .status_cache_ancestors() .into_iter() .filter(|&slot| slot > last_element) .collect(); blocks.append(&mut confirmed_blocks); blocks.truncate(limit); } Ok(blocks) } pub async fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> { if slot == 0 { return Ok(Some(self.genesis_creation_time())); } if slot <= self .block_commitment_cache .read() .unwrap() .highest_confirmed_root() { let result = self.blockstore.get_block_time(slot); self.check_blockstore_root(&result, slot)?; if result.is_err() || matches!(result, Ok(None)) { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = bigtable_ledger_storage.get_confirmed_block(slot).await; self.check_bigtable_result(&bigtable_result)?; return Ok(bigtable_result .ok() .and_then(|confirmed_block| confirmed_block.block_time)); } } self.check_slot_cleaned_up(&result, slot)?; Ok(result.ok().unwrap_or(None)) } else { let r_bank_forks = self.bank_forks.read().unwrap(); if let Some(bank) = r_bank_forks.get(slot) { Ok(Some(bank.clock().unix_timestamp)) } else { Err(RpcCustomError::BlockNotAvailable { slot }.into()) } } } pub fn get_signature_confirmation_status( &self, signature: Signature, commitment: Option<CommitmentConfig>, ) -> Option<RpcSignatureConfirmation> { let bank = self.bank(commitment); let transaction_status = self.get_transaction_status(signature, &bank)?; let confirmations = transaction_status .confirmations .unwrap_or(MAX_LOCKOUT_HISTORY + 1); Some(RpcSignatureConfirmation { confirmations, status: transaction_status.status, }) } pub fn get_signature_status( &self, signature: Signature, commitment: Option<CommitmentConfig>, ) -> Option<transaction::Result<()>> { let bank = self.bank(commitment); let (_, status) = bank.get_signature_status_slot(&signature)?; Some(status) } pub async fn get_signature_statuses( &self, signatures: Vec<Signature>, config: Option<RpcSignatureStatusConfig>, ) -> Result<RpcResponse<Vec<Option<TransactionStatus>>>> { let mut statuses: Vec<Option<TransactionStatus>> = vec![]; let search_transaction_history = config .map(|x| x.search_transaction_history) .unwrap_or(false); let bank = self.bank(Some(CommitmentConfig::processed())); if search_transaction_history && !self.config.enable_rpc_transaction_history { return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); } for signature in signatures { let status = if let Some(status) = self.get_transaction_status(signature, &bank) { Some(status) } else if self.config.enable_rpc_transaction_history && search_transaction_history { if let Some(status) = self .blockstore .get_rooted_transaction_status(signature) .map_err(|_| Error::internal_error())? .filter(|(slot, _status_meta)| { slot <= &self .block_commitment_cache .read() .unwrap() .highest_confirmed_root() }) .map(|(slot, status_meta)| { let err = status_meta.status.clone().err(); TransactionStatus { slot, status: status_meta.status, confirmations: None, err, confirmation_status: Some(TransactionConfirmationStatus::Finalized), } }) { Some(status) } else if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { bigtable_ledger_storage .get_signature_status(&signature) .await .map(Some) .unwrap_or(None) } else { None } } else { None }; statuses.push(status); } Ok(new_response(&bank, statuses)) } fn get_transaction_status( &self, signature: Signature, bank: &Arc<Bank>, ) -> Option<TransactionStatus> { let (slot, status) = bank.get_signature_status_slot(&signature)?; let r_block_commitment_cache = self.block_commitment_cache.read().unwrap(); let optimistically_confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); let optimistically_confirmed = optimistically_confirmed_bank.get_signature_status_slot(&signature); let confirmations = if r_block_commitment_cache.root() >= slot && is_finalized(&r_block_commitment_cache, bank, &self.blockstore, slot) { None } else { r_block_commitment_cache .get_confirmation_count(slot) .or(Some(0)) }; let err = status.clone().err(); Some(TransactionStatus { slot, status, confirmations, err, confirmation_status: if confirmations.is_none() { Some(TransactionConfirmationStatus::Finalized) } else if optimistically_confirmed.is_some() { Some(TransactionConfirmationStatus::Confirmed) } else { Some(TransactionConfirmationStatus::Processed) }, }) } pub async fn get_transaction( &self, signature: Signature, config: Option<RpcEncodingConfigWrapper<RpcTransactionConfig>>, ) -> Result<Option<EncodedConfirmedTransaction>> { let config = config .map(|config| config.convert_to_current()) .unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json); let commitment = config.commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; if self.config.enable_rpc_transaction_history { let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); let transaction = if commitment.is_confirmed() { let highest_confirmed_slot = confirmed_bank.slot(); self.blockstore .get_complete_transaction(signature, highest_confirmed_slot) } else { self.blockstore.get_rooted_transaction(signature) }; match transaction.unwrap_or(None) { Some(mut confirmed_transaction) => { if commitment.is_confirmed() && confirmed_bank // should be redundant .status_cache_ancestors() .contains(&confirmed_transaction.slot) { if confirmed_transaction.block_time.is_none() { let r_bank_forks = self.bank_forks.read().unwrap(); confirmed_transaction.block_time = r_bank_forks .get(confirmed_transaction.slot) .map(|bank| bank.clock().unix_timestamp); } return Ok(Some(confirmed_transaction.encode(encoding))); } if confirmed_transaction.slot <= self .block_commitment_cache .read() .unwrap() .highest_confirmed_root() { return Ok(Some(confirmed_transaction.encode(encoding))); } } None => { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return Ok(bigtable_ledger_storage .get_confirmed_transaction(&signature) .await .unwrap_or(None) .map(|confirmed| confirmed.encode(encoding))); } } } } else { return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); } Ok(None) } pub fn get_confirmed_signatures_for_address( &self, pubkey: Pubkey, start_slot: Slot, end_slot: Slot, ) -> Vec<Signature> { if self.config.enable_rpc_transaction_history { // TODO: Add bigtable_ledger_storage support as a part of // https://github.com/solana-labs/solana/pull/10928 let end_slot = min( end_slot, self.block_commitment_cache .read() .unwrap() .highest_confirmed_root(), ); self.blockstore .get_confirmed_signatures_for_address(pubkey, start_slot, end_slot) .unwrap_or_default() } else { vec![] } } pub async fn get_signatures_for_address( &self, address: Pubkey, mut before: Option<Signature>, until: Option<Signature>, mut limit: usize, commitment: Option<CommitmentConfig>, ) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>> { let commitment = commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; if self.config.enable_rpc_transaction_history { let highest_confirmed_root = self .block_commitment_cache .read() .unwrap() .highest_confirmed_root(); let highest_slot = if commitment.is_confirmed() { let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); confirmed_bank.slot() } else { highest_confirmed_root }; let mut results = self .blockstore .get_confirmed_signatures_for_address2(address, highest_slot, before, until, limit) .map_err(|err| Error::invalid_params(format!("{}", err)))?; if results.len() < limit { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { if !results.is_empty() { limit -= results.len(); before = results.last().map(|x| x.signature); } let bigtable_results = bigtable_ledger_storage .get_confirmed_signatures_for_address( &address, before.as_ref(), until.as_ref(), limit, ) .await; match bigtable_results { Ok(bigtable_results) => { results.extend(bigtable_results.into_iter().map(|x| x.0)); } Err(err) => { warn!("{:?}", err); } } } } Ok(results .into_iter() .map(|x| { let mut item: RpcConfirmedTransactionStatusWithSignature = x.into(); if item.slot <= highest_confirmed_root { item.confirmation_status = Some(TransactionConfirmationStatus::Finalized); } else { item.confirmation_status = Some(TransactionConfirmationStatus::Confirmed); if item.block_time.is_none() { let r_bank_forks = self.bank_forks.read().unwrap(); item.block_time = r_bank_forks .get(item.slot) .map(|bank| bank.clock().unix_timestamp); } } item }) .collect()) } else { Err(RpcCustomError::TransactionHistoryNotAvailable.into()) } } pub async fn get_first_available_block(&self) -> Slot { let slot = self .blockstore .get_first_available_block() .unwrap_or_default(); if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_slot = bigtable_ledger_storage .get_first_available_block() .await .unwrap_or(None) .unwrap_or(slot); if bigtable_slot < slot { return bigtable_slot; } } slot } pub fn get_stake_activation( &self, pubkey: &Pubkey, config: Option<RpcEpochConfig>, ) -> Result<RpcStakeActivation> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let epoch = config.epoch.unwrap_or_else(|| bank.epoch()); if bank.epoch().saturating_sub(epoch) > solana_sdk::stake_history::MAX_ENTRIES as u64 { return Err(Error::invalid_params(format!( "Invalid param: epoch {:?} is too far in the past", epoch ))); } if epoch > bank.epoch() { return Err(Error::invalid_params(format!( "Invalid param: epoch {:?} has not yet started", epoch ))); } let stake_account = bank .get_account(pubkey) .ok_or_else(|| Error::invalid_params("Invalid param: account not found".to_string()))?; let stake_state: StakeState = stake_account .state() .map_err(|_| Error::invalid_params("Invalid param: not a stake account".to_string()))?; let delegation = stake_state.delegation(); if delegation.is_none() { match stake_state.meta() { None => { return Err(Error::invalid_params( "Invalid param: stake account not initialized".to_string(), )); } Some(meta) => { let rent_exempt_reserve = meta.rent_exempt_reserve; return Ok(RpcStakeActivation { state: StakeActivationState::Inactive, active: 0, inactive: stake_account.lamports().saturating_sub(rent_exempt_reserve), }); } } } let delegation = delegation.unwrap(); let stake_history_account = bank .get_account(&stake_history::id()) .ok_or_else(Error::internal_error)?; let stake_history = solana_sdk::account::from_account::<StakeHistory, _>(&stake_history_account) .ok_or_else(Error::internal_error)?; let StakeActivationStatus { effective, activating, deactivating, } = delegation.stake_activating_and_deactivating(epoch, Some(&stake_history)); let stake_activation_state = if deactivating > 0 { StakeActivationState::Deactivating } else if activating > 0 { StakeActivationState::Activating } else if effective > 0 { StakeActivationState::Active } else { StakeActivationState::Inactive }; let inactive_stake = match stake_activation_state { StakeActivationState::Activating => activating, StakeActivationState::Active => 0, StakeActivationState::Deactivating => delegation.stake.saturating_sub(effective), StakeActivationState::Inactive => delegation.stake, }; Ok(RpcStakeActivation { state: stake_activation_state, active: effective, inactive: inactive_stake, }) } pub fn get_token_account_balance( &self, pubkey: &Pubkey, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>> { let bank = self.bank(commitment); let account = bank.get_account(pubkey).ok_or_else(|| { Error::invalid_params("Invalid param: could not find account".to_string()) })?; if account.owner() != &spl_token_id() { return Err(Error::invalid_params( "Invalid param: not a Token account".to_string(), )); } let token_account = TokenAccount::unpack(account.data()) .map_err(|_| Error::invalid_params("Invalid param: not a Token account".to_string()))?; let mint = &Pubkey::from_str(&token_account.mint.to_string()) .expect("Token account mint should be convertible to Pubkey"); let (_, decimals) = get_mint_owner_and_decimals(&bank, mint)?; let balance = token_amount_to_ui_amount(token_account.amount, decimals); Ok(new_response(&bank, balance)) } pub fn get_token_supply( &self, mint: &Pubkey, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>> { let bank = self.bank(commitment); let mint_account = bank.get_account(mint).ok_or_else(|| { Error::invalid_params("Invalid param: could not find account".to_string()) })?; if mint_account.owner() != &spl_token_id() { return Err(Error::invalid_params( "Invalid param: not a Token mint".to_string(), )); } let mint = Mint::unpack(mint_account.data()).map_err(|_| { Error::invalid_params("Invalid param: mint could not be unpacked".to_string()) })?; let supply = token_amount_to_ui_amount(mint.supply, mint.decimals); Ok(new_response(&bank, supply)) } pub fn get_token_largest_accounts( &self, mint: &Pubkey, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Vec<RpcTokenAccountBalance>>> { let bank = self.bank(commitment); let (mint_owner, decimals) = get_mint_owner_and_decimals(&bank, mint)?; if mint_owner != spl_token_id() { return Err(Error::invalid_params( "Invalid param: not a Token mint".to_string(), )); } let mut token_balances: Vec<RpcTokenAccountBalance> = self .get_filtered_spl_token_accounts_by_mint(&bank, mint, vec![])? .into_iter() .map(|(address, account)| { let amount = TokenAccount::unpack(account.data()) .map(|account| account.amount) .unwrap_or(0); let amount = token_amount_to_ui_amount(amount, decimals); RpcTokenAccountBalance { address: address.to_string(), amount, } }) .collect(); token_balances.sort_by(|a, b| { a.amount .amount .parse::<u64>() .unwrap() .cmp(&b.amount.amount.parse::<u64>().unwrap()) .reverse() }); token_balances.truncate(NUM_LARGEST_ACCOUNTS); Ok(new_response(&bank, token_balances)) } pub fn get_token_accounts_by_owner( &self, owner: &Pubkey, token_account_filter: TokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); let data_slice_config = config.data_slice; check_slice_and_encoding(&encoding, data_slice_config.is_some())?; let (_, mint) = get_token_program_id_and_mint(&bank, token_account_filter)?; let mut filters = vec![]; if let Some(mint) = mint { // Optional filter on Mint address filters.push(RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Bytes(mint.to_bytes().into()), encoding: None, })); } let keyed_accounts = self.get_filtered_spl_token_accounts_by_owner(&bank, owner, filters)?; let accounts = if encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() } else { keyed_accounts .into_iter() .map(|(pubkey, account)| RpcKeyedAccount { pubkey: pubkey.to_string(), account: UiAccount::encode( &pubkey, &account, encoding, None, data_slice_config, ), }) .collect() }; Ok(new_response(&bank, accounts)) } pub fn get_token_accounts_by_delegate( &self, delegate: &Pubkey, token_account_filter: TokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); let data_slice_config = config.data_slice; check_slice_and_encoding(&encoding, data_slice_config.is_some())?; let (token_program_id, mint) = get_token_program_id_and_mint(&bank, token_account_filter)?; let mut filters = vec![ // Filter on Delegate is_some() RpcFilterType::Memcmp(Memcmp { offset: 72, bytes: MemcmpEncodedBytes::Bytes(bincode::serialize(&1u32).unwrap()), encoding: None, }), // Filter on Delegate address RpcFilterType::Memcmp(Memcmp { offset: 76, bytes: MemcmpEncodedBytes::Bytes(delegate.to_bytes().into()), encoding: None, }), ]; // Optional filter on Mint address, uses mint account index for scan let keyed_accounts = if let Some(mint) = mint { self.get_filtered_spl_token_accounts_by_mint(&bank, &mint, filters)? } else { // Filter on Token Account state filters.push(RpcFilterType::DataSize( TokenAccount::get_packed_len() as u64 )); self.get_filtered_program_accounts(&bank, &token_program_id, filters)? }; let accounts = if encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() } else { keyed_accounts .into_iter() .map(|(pubkey, account)| RpcKeyedAccount { pubkey: pubkey.to_string(), account: UiAccount::encode( &pubkey, &account, encoding, None, data_slice_config, ), }) .collect() }; Ok(new_response(&bank, accounts)) } /// Use a set of filters to get an iterator of keyed program accounts from a bank fn get_filtered_program_accounts( &self, bank: &Arc<Bank>, program_id: &Pubkey, mut filters: Vec<RpcFilterType>, ) -> RpcCustomResult<Vec<(Pubkey, AccountSharedData)>> { optimize_filters(&mut filters); let filter_closure = |account: &AccountSharedData| { filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }; if self .config .account_indexes .contains(&AccountIndex::ProgramId) { if !self.config.account_indexes.include_key(program_id) { return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { index_key: program_id.to_string(), }); } Ok(bank .get_filtered_indexed_accounts( &IndexKey::ProgramId(*program_id), |account| { // The program-id account index checks for Account owner on inclusion. However, due // to the current AccountsDb implementation, an account may remain in storage as a // zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these // accounts. account.owner() == program_id && filter_closure(account) }, &ScanConfig::default(), bank.byte_limit_for_scans(), ) .map_err(|e| RpcCustomError::ScanError { message: e.to_string(), })?) } else { // this path does not need to provide a mb limit because we only want to support secondary indexes Ok(bank .get_filtered_program_accounts(program_id, filter_closure, &ScanConfig::default()) .map_err(|e| RpcCustomError::ScanError { message: e.to_string(), })?) } } /// Get an iterator of spl-token accounts by owner address fn get_filtered_spl_token_accounts_by_owner( &self, bank: &Arc<Bank>, owner_key: &Pubkey, mut filters: Vec<RpcFilterType>, ) -> RpcCustomResult<Vec<(Pubkey, AccountSharedData)>> { // The by-owner accounts index checks for Token Account state and Owner address on // inclusion. However, due to the current AccountsDb implementation, an account may remain // in storage as a zero-lamport AccountSharedData::Default() after being wiped and reinitialized in // later updates. We include the redundant filters here to avoid returning these accounts. // // Filter on Token Account state filters.push(RpcFilterType::DataSize( TokenAccount::get_packed_len() as u64 )); // Filter on Owner address filters.push(RpcFilterType::Memcmp(Memcmp { offset: SPL_TOKEN_ACCOUNT_OWNER_OFFSET, bytes: MemcmpEncodedBytes::Bytes(owner_key.to_bytes().into()), encoding: None, })); if self .config .account_indexes .contains(&AccountIndex::SplTokenOwner) { if !self.config.account_indexes.include_key(owner_key) { return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { index_key: owner_key.to_string(), }); } Ok(bank .get_filtered_indexed_accounts( &IndexKey::SplTokenOwner(*owner_key), |account| { account.owner() == &spl_token_id() && filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => { account.data().len() as u64 == *size } RpcFilterType::Memcmp(compare) => { compare.bytes_match(account.data()) } }) }, &ScanConfig::default(), bank.byte_limit_for_scans(), ) .map_err(|e| RpcCustomError::ScanError { message: e.to_string(), })?) } else { self.get_filtered_program_accounts(bank, &spl_token_id(), filters) } } /// Get an iterator of spl-token accounts by mint address fn get_filtered_spl_token_accounts_by_mint( &self, bank: &Arc<Bank>, mint_key: &Pubkey, mut filters: Vec<RpcFilterType>, ) -> RpcCustomResult<Vec<(Pubkey, AccountSharedData)>> { // The by-mint accounts index checks for Token Account state and Mint address on inclusion. // However, due to the current AccountsDb implementation, an account may remain in storage // as be zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these accounts. // // Filter on Token Account state filters.push(RpcFilterType::DataSize( TokenAccount::get_packed_len() as u64 )); // Filter on Mint address filters.push(RpcFilterType::Memcmp(Memcmp { offset: SPL_TOKEN_ACCOUNT_MINT_OFFSET, bytes: MemcmpEncodedBytes::Bytes(mint_key.to_bytes().into()), encoding: None, })); if self .config .account_indexes .contains(&AccountIndex::SplTokenMint) { if !self.config.account_indexes.include_key(mint_key) { return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { index_key: mint_key.to_string(), }); } Ok(bank .get_filtered_indexed_accounts( &IndexKey::SplTokenMint(*mint_key), |account| { account.owner() == &spl_token_id() && filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => { account.data().len() as u64 == *size } RpcFilterType::Memcmp(compare) => { compare.bytes_match(account.data()) } }) }, &ScanConfig::default(), bank.byte_limit_for_scans(), ) .map_err(|e| RpcCustomError::ScanError { message: e.to_string(), })?) } else { self.get_filtered_program_accounts(bank, &spl_token_id(), filters) } } fn get_latest_blockhash( &self, commitment: Option<CommitmentConfig>, ) -> RpcResponse<RpcBlockhash> { let bank = self.bank(commitment); let blockhash = bank.last_blockhash(); let last_valid_block_height = bank .get_blockhash_last_valid_block_height(&blockhash) .expect("bank blockhash queue should contain blockhash"); new_response( &bank, RpcBlockhash { blockhash: blockhash.to_string(), last_valid_block_height, }, ) } fn is_blockhash_valid( &self, blockhash: &Hash, commitment: Option<CommitmentConfig>, ) -> RpcResponse<bool> { let bank = self.bank(commitment); let is_valid = bank.is_blockhash_valid(blockhash); new_response(&bank, is_valid) } fn get_fee_for_message( &self, message: &SanitizedMessage, commitment: Option<CommitmentConfig>, ) -> RpcResponse<Option<u64>> { let bank = self.bank(commitment); let fee = bank.get_fee_for_message(message); new_response(&bank, fee) } } fn optimize_filters(filters: &mut Vec<RpcFilterType>) { filters.iter_mut().for_each(|filter_type| { if let RpcFilterType::Memcmp(compare) = filter_type { use MemcmpEncodedBytes::*; match &compare.bytes { #[allow(deprecated)] Binary(bytes) | Base58(bytes) => { compare.bytes = Bytes(bs58::decode(bytes).into_vec().unwrap()); } Base64(bytes) => { compare.bytes = Bytes(base64::decode(bytes).unwrap()); } _ => {} } } }) } fn verify_transaction( transaction: &SanitizedTransaction, feature_set: &Arc<feature_set::FeatureSet>, ) -> Result<()> { #[allow(clippy::question_mark)] if transaction.verify().is_err() { return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); } if let Err(e) = transaction.verify_precompiles(feature_set) { return Err(RpcCustomError::TransactionPrecompileVerificationFailure(e).into()); } if !transaction.verify_signatures_len() { return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); } Ok(()) } fn verify_filter(input: &RpcFilterType) -> Result<()> { input .verify() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } fn verify_pubkey(input: &str) -> Result<Pubkey> { input .parse() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } fn verify_hash(input: &str) -> Result<Hash> { input .parse() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } fn verify_signature(input: &str) -> Result<Signature> { input .parse() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } fn verify_token_account_filter( token_account_filter: RpcTokenAccountsFilter, ) -> Result<TokenAccountsFilter> { match token_account_filter { RpcTokenAccountsFilter::Mint(mint_str) => { let mint = verify_pubkey(&mint_str)?; Ok(TokenAccountsFilter::Mint(mint)) } RpcTokenAccountsFilter::ProgramId(program_id_str) => { let program_id = verify_pubkey(&program_id_str)?; Ok(TokenAccountsFilter::ProgramId(program_id)) } } } fn verify_and_parse_signatures_for_address_params( address: String, before: Option<String>, until: Option<String>, limit: Option<usize>, ) -> Result<(Pubkey, Option<Signature>, Option<Signature>, usize)> { let address = verify_pubkey(&address)?; let before = before .map(|ref before| verify_signature(before)) .transpose()?; let until = until.map(|ref until| verify_signature(until)).transpose()?; let limit = limit.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT); if limit == 0 || limit > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT { return Err(Error::invalid_params(format!( "Invalid limit; max {}", MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT ))); } Ok((address, before, until, limit)) } fn check_is_at_least_confirmed(commitment: CommitmentConfig) -> Result<()> { if !commitment.is_at_least_confirmed() { return Err(Error::invalid_params( "Method does not support commitment below `confirmed`", )); } Ok(()) } fn check_slice_and_encoding(encoding: &UiAccountEncoding, data_slice_is_some: bool) -> Result<()> { match encoding { UiAccountEncoding::JsonParsed => { if data_slice_is_some { let message = "Sliced account data can only be encoded using binary (base 58) or base64 encoding." .to_string(); Err(error::Error { code: error::ErrorCode::InvalidRequest, message, data: None, }) } else { Ok(()) } } UiAccountEncoding::Binary | UiAccountEncoding::Base58 | UiAccountEncoding::Base64 | UiAccountEncoding::Base64Zstd => Ok(()), } } fn get_encoded_account( bank: &Arc<Bank>, pubkey: &Pubkey, encoding: UiAccountEncoding, data_slice: Option<UiDataSliceConfig>, ) -> Result<Option<UiAccount>> { match bank.get_account(pubkey) { Some(account) => { let response = if account.owner() == &spl_token_id() && encoding == UiAccountEncoding::JsonParsed { get_parsed_token_account(bank.clone(), pubkey, account) } else { encode_account(&account, pubkey, encoding, data_slice)? }; Ok(Some(response)) } None => Ok(None), } } fn encode_account<T: ReadableAccount>( account: &T, pubkey: &Pubkey, encoding: UiAccountEncoding, data_slice: Option<UiDataSliceConfig>, ) -> Result<UiAccount> { if (encoding == UiAccountEncoding::Binary || encoding == UiAccountEncoding::Base58) && account.data().len() > MAX_BASE58_BYTES { let message = format!("Encoded binary (base 58) data should be less than {} bytes, please use Base64 encoding.", MAX_BASE58_BYTES); Err(error::Error { code: error::ErrorCode::InvalidRequest, message, data: None, }) } else { Ok(UiAccount::encode( pubkey, account, encoding, None, data_slice, )) } } /// Analyze custom filters to determine if the result will be a subset of spl-token accounts by /// owner. /// NOTE: `optimize_filters()` should almost always be called before using this method because of /// the strict match on `MemcmpEncodedBytes::Bytes`. fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option<Pubkey> { if program_id != &spl_token_id() { return None; } let mut data_size_filter: Option<u64> = None; let mut owner_key: Option<Pubkey> = None; let mut incorrect_owner_len: Option<usize> = None; for filter in filters { match filter { RpcFilterType::DataSize(size) => data_size_filter = Some(*size), RpcFilterType::Memcmp(Memcmp { offset: SPL_TOKEN_ACCOUNT_OWNER_OFFSET, bytes: MemcmpEncodedBytes::Bytes(bytes), .. }) => { if bytes.len() == PUBKEY_BYTES { owner_key = Some(Pubkey::new(bytes)); } else { incorrect_owner_len = Some(bytes.len()); } } _ => {} } } if data_size_filter == Some(TokenAccount::get_packed_len() as u64) { if let Some(incorrect_owner_len) = incorrect_owner_len { info!( "Incorrect num bytes ({:?}) provided for spl_token_owner_filter", incorrect_owner_len ); } owner_key } else { debug!("spl_token program filters do not match by-owner index requisites"); None } } /// Analyze custom filters to determine if the result will be a subset of spl-token accounts by /// mint. /// NOTE: `optimize_filters()` should almost always be called before using this method because of /// the strict match on `MemcmpEncodedBytes::Bytes`. fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option<Pubkey> { if program_id != &spl_token_id() { return None; } let mut data_size_filter: Option<u64> = None; let mut mint: Option<Pubkey> = None; let mut incorrect_mint_len: Option<usize> = None; for filter in filters { match filter { RpcFilterType::DataSize(size) => data_size_filter = Some(*size), RpcFilterType::Memcmp(Memcmp { offset: SPL_TOKEN_ACCOUNT_MINT_OFFSET, bytes: MemcmpEncodedBytes::Bytes(bytes), .. }) => { if bytes.len() == PUBKEY_BYTES { mint = Some(Pubkey::new(bytes)); } else { incorrect_mint_len = Some(bytes.len()); } } _ => {} } } if data_size_filter == Some(TokenAccount::get_packed_len() as u64) { if let Some(incorrect_mint_len) = incorrect_mint_len { info!( "Incorrect num bytes ({:?}) provided for spl_token_mint_filter", incorrect_mint_len ); } mint } else { debug!("spl_token program filters do not match by-mint index requisites"); None } } /// Analyze a passed Pubkey that may be a Token program id or Mint address to determine the program /// id and optional Mint fn get_token_program_id_and_mint( bank: &Arc<Bank>, token_account_filter: TokenAccountsFilter, ) -> Result<(Pubkey, Option<Pubkey>)> { match token_account_filter { TokenAccountsFilter::Mint(mint) => { let (mint_owner, _) = get_mint_owner_and_decimals(bank, &mint)?; if mint_owner != spl_token_id() { return Err(Error::invalid_params( "Invalid param: not a Token mint".to_string(), )); } Ok((mint_owner, Some(mint))) } TokenAccountsFilter::ProgramId(program_id) => { if program_id == spl_token_id() { Ok((program_id, None)) } else { Err(Error::invalid_params( "Invalid param: unrecognized Token program id".to_string(), )) } } } } fn _send_transaction( meta: JsonRpcRequestProcessor, signature: Signature, wire_transaction: Vec<u8>, last_valid_block_height: u64, durable_nonce_info: Option<(Pubkey, Hash)>, max_retries: Option<usize>, ) -> Result<String> { let transaction_info = TransactionInfo::new( signature, wire_transaction, last_valid_block_height, durable_nonce_info, max_retries, ); meta.transaction_sender .lock() .unwrap() .send(transaction_info) .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err)); Ok(signature.to_string()) } // Minimal RPC interface that known validators are expected to provide pub mod rpc_minimal { use super::*; #[rpc] pub trait Minimal { type Metadata; #[rpc(meta, name = "getBalance")] fn get_balance( &self, meta: Self::Metadata, pubkey_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<u64>>; #[rpc(meta, name = "getEpochInfo")] fn get_epoch_info( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<EpochInfo>; #[rpc(meta, name = "getHealth")] fn get_health(&self, meta: Self::Metadata) -> Result<String>; #[rpc(meta, name = "getIdentity")] fn get_identity(&self, meta: Self::Metadata) -> Result<RpcIdentity>; #[rpc(meta, name = "getSlot")] fn get_slot( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<Slot>; #[rpc(meta, name = "getBlockHeight")] fn get_block_height( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64>; #[rpc(meta, name = "getHighestSnapshotSlot")] fn get_highest_snapshot_slot(&self, meta: Self::Metadata) -> Result<RpcSnapshotSlotInfo>; #[rpc(meta, name = "getTransactionCount")] fn get_transaction_count( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64>; #[rpc(meta, name = "getVersion")] fn get_version(&self, meta: Self::Metadata) -> Result<RpcVersionInfo>; // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal #[rpc(meta, name = "getVoteAccounts")] fn get_vote_accounts( &self, meta: Self::Metadata, config: Option<RpcGetVoteAccountsConfig>, ) -> Result<RpcVoteAccountStatus>; // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal #[rpc(meta, name = "getLeaderSchedule")] fn get_leader_schedule( &self, meta: Self::Metadata, options: Option<RpcLeaderScheduleConfigWrapper>, config: Option<RpcLeaderScheduleConfig>, ) -> Result<Option<RpcLeaderSchedule>>; } pub struct MinimalImpl; impl Minimal for MinimalImpl { type Metadata = JsonRpcRequestProcessor; fn get_balance( &self, meta: Self::Metadata, pubkey_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<u64>> { debug!("get_balance rpc request received: {:?}", pubkey_str); let pubkey = verify_pubkey(&pubkey_str)?; Ok(meta.get_balance(&pubkey, commitment)) } fn get_epoch_info( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<EpochInfo> { debug!("get_epoch_info rpc request received"); let bank = meta.bank(commitment); Ok(bank.get_epoch_info()) } fn get_health(&self, meta: Self::Metadata) -> Result<String> { match meta.health.check() { RpcHealthStatus::Ok => Ok("ok".to_string()), RpcHealthStatus::Unknown => Err(RpcCustomError::NodeUnhealthy { num_slots_behind: None, } .into()), RpcHealthStatus::Behind { num_slots } => Err(RpcCustomError::NodeUnhealthy { num_slots_behind: Some(num_slots), } .into()), } } fn get_identity(&self, meta: Self::Metadata) -> Result<RpcIdentity> { debug!("get_identity rpc request received"); Ok(RpcIdentity { identity: meta.cluster_info.id().to_string(), }) } fn get_slot( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<Slot> { debug!("get_slot rpc request received"); Ok(meta.get_slot(commitment)) } fn get_block_height( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64> { debug!("get_block_height rpc request received"); Ok(meta.get_block_height(commitment)) } fn get_highest_snapshot_slot(&self, meta: Self::Metadata) -> Result<RpcSnapshotSlotInfo> { debug!("get_highest_snapshot_slot rpc request received"); if meta.snapshot_config.is_none() { return Err(RpcCustomError::NoSnapshot.into()); } let snapshot_archives_dir = meta .snapshot_config .map(|snapshot_config| snapshot_config.snapshot_archives_dir) .unwrap(); let full_snapshot_slot = snapshot_utils::get_highest_full_snapshot_archive_slot(&snapshot_archives_dir) .ok_or(RpcCustomError::NoSnapshot)?; let incremental_snapshot_slot = snapshot_utils::get_highest_incremental_snapshot_archive_slot( &snapshot_archives_dir, full_snapshot_slot, ); Ok(RpcSnapshotSlotInfo { full: full_snapshot_slot, incremental: incremental_snapshot_slot, }) } fn get_transaction_count( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64> { debug!("get_transaction_count rpc request received"); Ok(meta.get_transaction_count(commitment)) } fn get_version(&self, _: Self::Metadata) -> Result<RpcVersionInfo> { debug!("get_version rpc request received"); let version = solana_version::Version::default(); Ok(RpcVersionInfo { solana_core: version.to_string(), feature_set: Some(version.feature_set), }) } // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal fn get_vote_accounts( &self, meta: Self::Metadata, config: Option<RpcGetVoteAccountsConfig>, ) -> Result<RpcVoteAccountStatus> { debug!("get_vote_accounts rpc request received"); meta.get_vote_accounts(config) } // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal fn get_leader_schedule( &self, meta: Self::Metadata, options: Option<RpcLeaderScheduleConfigWrapper>, config: Option<RpcLeaderScheduleConfig>, ) -> Result<Option<RpcLeaderSchedule>> { let (slot, maybe_config) = options.map(|options| options.unzip()).unwrap_or_default(); let config = maybe_config.or(config).unwrap_or_default(); if let Some(ref identity) = config.identity { let _ = verify_pubkey(identity)?; } let bank = meta.bank(config.commitment); let slot = slot.unwrap_or_else(|| bank.slot()); let epoch = bank.epoch_schedule().get_epoch(slot); debug!("get_leader_schedule rpc request received: {:?}", slot); Ok(meta .leader_schedule_cache .get_epoch_leader_schedule(epoch) .map(|leader_schedule| { let mut schedule_by_identity = solana_ledger::leader_schedule_utils::leader_schedule_by_identity( leader_schedule.get_slot_leaders().iter().enumerate(), ); if let Some(identity) = config.identity { schedule_by_identity.retain(|k, _| *k == identity); } schedule_by_identity })) } } } // RPC interface that only depends on immediate Bank data // Expected to be provided by both API nodes and (future) accounts replica nodes pub mod rpc_bank { use super::*; #[rpc] pub trait BankData { type Metadata; #[rpc(meta, name = "getMinimumBalanceForRentExemption")] fn get_minimum_balance_for_rent_exemption( &self, meta: Self::Metadata, data_len: usize, commitment: Option<CommitmentConfig>, ) -> Result<u64>; #[rpc(meta, name = "getInflationGovernor")] fn get_inflation_governor( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcInflationGovernor>; #[rpc(meta, name = "getInflationRate")] fn get_inflation_rate(&self, meta: Self::Metadata) -> Result<RpcInflationRate>; #[rpc(meta, name = "getEpochSchedule")] fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result<EpochSchedule>; #[rpc(meta, name = "getSlotLeader")] fn get_slot_leader( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<String>; #[rpc(meta, name = "getSlotLeaders")] fn get_slot_leaders( &self, meta: Self::Metadata, start_slot: Slot, limit: u64, ) -> Result<Vec<String>>; #[rpc(meta, name = "getBlockProduction")] fn get_block_production( &self, meta: Self::Metadata, config: Option<RpcBlockProductionConfig>, ) -> Result<RpcResponse<RpcBlockProduction>>; } pub struct BankDataImpl; impl BankData for BankDataImpl { type Metadata = JsonRpcRequestProcessor; fn get_minimum_balance_for_rent_exemption( &self, meta: Self::Metadata, data_len: usize, commitment: Option<CommitmentConfig>, ) -> Result<u64> { debug!( "get_minimum_balance_for_rent_exemption rpc request received: {:?}", data_len ); if data_len as u64 > system_instruction::MAX_PERMITTED_DATA_LENGTH { return Err(Error::invalid_request()); } Ok(meta.get_minimum_balance_for_rent_exemption(data_len, commitment)) } fn get_inflation_governor( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcInflationGovernor> { debug!("get_inflation_governor rpc request received"); Ok(meta.get_inflation_governor(commitment)) } fn get_inflation_rate(&self, meta: Self::Metadata) -> Result<RpcInflationRate> { debug!("get_inflation_rate rpc request received"); Ok(meta.get_inflation_rate()) } fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result<EpochSchedule> { debug!("get_epoch_schedule rpc request received"); Ok(meta.get_epoch_schedule()) } fn get_slot_leader( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<String> { debug!("get_slot_leader rpc request received"); Ok(meta.get_slot_leader(commitment)) } fn get_slot_leaders( &self, meta: Self::Metadata, start_slot: Slot, limit: u64, ) -> Result<Vec<String>> { debug!( "get_slot_leaders rpc request received (start: {} limit: {})", start_slot, limit ); let limit = limit as usize; if limit > MAX_GET_SLOT_LEADERS { return Err(Error::invalid_params(format!( "Invalid limit; max {}", MAX_GET_SLOT_LEADERS ))); } Ok(meta .get_slot_leaders(None, start_slot, limit)? .into_iter() .map(|identity| identity.to_string()) .collect()) } fn get_block_production( &self, meta: Self::Metadata, config: Option<RpcBlockProductionConfig>, ) -> Result<RpcResponse<RpcBlockProduction>> { debug!("get_block_production rpc request received"); let config = config.unwrap_or_default(); let filter_by_identity = if let Some(ref identity) = config.identity { Some(verify_pubkey(identity)?) } else { None }; let bank = meta.bank(config.commitment); let (first_slot, last_slot) = match config.range { None => ( bank.epoch_schedule().get_first_slot_in_epoch(bank.epoch()), bank.slot(), ), Some(range) => { let first_slot = range.first_slot; let last_slot = range.last_slot.unwrap_or_else(|| bank.slot()); if last_slot < first_slot { return Err(Error::invalid_params(format!( "lastSlot, {}, cannot be less than firstSlot, {}", last_slot, first_slot ))); } (first_slot, last_slot) } }; let slot_history = bank.get_slot_history(); if first_slot < slot_history.oldest() { return Err(Error::invalid_params(format!( "firstSlot, {}, is too small; min {}", first_slot, slot_history.oldest() ))); } if last_slot > slot_history.newest() { return Err(Error::invalid_params(format!( "lastSlot, {}, is too large; max {}", last_slot, slot_history.newest() ))); } let slot_leaders = meta.get_slot_leaders( config.commitment, first_slot, last_slot.saturating_sub(first_slot) as usize + 1, // +1 because last_slot is inclusive )?; let mut block_production: HashMap<_, (usize, usize)> = HashMap::new(); let mut slot = first_slot; for identity in slot_leaders { if let Some(ref filter_by_identity) = filter_by_identity { if identity != *filter_by_identity { slot += 1; continue; } } let mut entry = block_production.entry(identity).or_default(); if slot_history.check(slot) == solana_sdk::slot_history::Check::Found { entry.1 += 1; // Increment blocks_produced } entry.0 += 1; // Increment leader_slots slot += 1; } Ok(new_response( &bank, RpcBlockProduction { by_identity: block_production .into_iter() .map(|(k, v)| (k.to_string(), v)) .collect(), range: RpcBlockProductionRange { first_slot, last_slot, }, }, )) } } } // RPC interface that depends on AccountsDB // Expected to be provided by API nodes, but collected for easy separation and offloading to // accounts replica nodes in the future. pub mod rpc_accounts { use super::*; #[rpc] pub trait AccountsData { type Metadata; #[rpc(meta, name = "getAccountInfo")] fn get_account_info( &self, meta: Self::Metadata, pubkey_str: String, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Option<UiAccount>>>; #[rpc(meta, name = "getMultipleAccounts")] fn get_multiple_accounts( &self, meta: Self::Metadata, pubkey_strs: Vec<String>, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<Option<UiAccount>>>>; #[rpc(meta, name = "getProgramAccounts")] fn get_program_accounts( &self, meta: Self::Metadata, program_id_str: String, config: Option<RpcProgramAccountsConfig>, ) -> Result<OptionalContext<Vec<RpcKeyedAccount>>>; #[rpc(meta, name = "getBlockCommitment")] fn get_block_commitment( &self, meta: Self::Metadata, block: Slot, ) -> Result<RpcBlockCommitment<BlockCommitmentArray>>; #[rpc(meta, name = "getLargestAccounts")] fn get_largest_accounts( &self, meta: Self::Metadata, config: Option<RpcLargestAccountsConfig>, ) -> Result<RpcResponse<Vec<RpcAccountBalance>>>; #[rpc(meta, name = "getSupply")] fn get_supply( &self, meta: Self::Metadata, config: Option<RpcSupplyConfig>, ) -> Result<RpcResponse<RpcSupply>>; #[rpc(meta, name = "getStakeActivation")] fn get_stake_activation( &self, meta: Self::Metadata, pubkey_str: String, config: Option<RpcEpochConfig>, ) -> Result<RpcStakeActivation>; // SPL Token-specific RPC endpoints // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for // program details #[rpc(meta, name = "getTokenAccountBalance")] fn get_token_account_balance( &self, meta: Self::Metadata, pubkey_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>>; #[rpc(meta, name = "getTokenSupply")] fn get_token_supply( &self, meta: Self::Metadata, mint_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>>; #[rpc(meta, name = "getTokenLargestAccounts")] fn get_token_largest_accounts( &self, meta: Self::Metadata, mint_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Vec<RpcTokenAccountBalance>>>; #[rpc(meta, name = "getTokenAccountsByOwner")] fn get_token_accounts_by_owner( &self, meta: Self::Metadata, owner_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>>; #[rpc(meta, name = "getTokenAccountsByDelegate")] fn get_token_accounts_by_delegate( &self, meta: Self::Metadata, delegate_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>>; } pub struct AccountsDataImpl; impl AccountsData for AccountsDataImpl { type Metadata = JsonRpcRequestProcessor; fn get_account_info( &self, meta: Self::Metadata, pubkey_str: String, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Option<UiAccount>>> { debug!("get_account_info rpc request received: {:?}", pubkey_str); let pubkey = verify_pubkey(&pubkey_str)?; meta.get_account_info(&pubkey, config) } fn get_multiple_accounts( &self, meta: Self::Metadata, pubkey_strs: Vec<String>, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<Option<UiAccount>>>> { debug!( "get_multiple_accounts rpc request received: {:?}", pubkey_strs.len() ); let max_multiple_accounts = meta .config .max_multiple_accounts .unwrap_or(MAX_MULTIPLE_ACCOUNTS); if pubkey_strs.len() > max_multiple_accounts { return Err(Error::invalid_params(format!( "Too many inputs provided; max {}", max_multiple_accounts ))); } let pubkeys = pubkey_strs .into_iter() .map(|pubkey_str| verify_pubkey(&pubkey_str)) .collect::<Result<Vec<_>>>()?; meta.get_multiple_accounts(pubkeys, config) } fn get_program_accounts( &self, meta: Self::Metadata, program_id_str: String, config: Option<RpcProgramAccountsConfig>, ) -> Result<OptionalContext<Vec<RpcKeyedAccount>>> { debug!( "get_program_accounts rpc request received: {:?}", program_id_str ); let program_id = verify_pubkey(&program_id_str)?; let (config, filters, with_context) = if let Some(config) = config { ( Some(config.account_config), config.filters.unwrap_or_default(), config.with_context.unwrap_or_default(), ) } else { (None, vec![], false) }; if filters.len() > MAX_GET_PROGRAM_ACCOUNT_FILTERS { return Err(Error::invalid_params(format!( "Too many filters provided; max {}", MAX_GET_PROGRAM_ACCOUNT_FILTERS ))); } for filter in &filters { verify_filter(filter)?; } meta.get_program_accounts(&program_id, config, filters, with_context) } fn get_block_commitment( &self, meta: Self::Metadata, block: Slot, ) -> Result<RpcBlockCommitment<BlockCommitmentArray>> { debug!("get_block_commitment rpc request received"); Ok(meta.get_block_commitment(block)) } fn get_largest_accounts( &self, meta: Self::Metadata, config: Option<RpcLargestAccountsConfig>, ) -> Result<RpcResponse<Vec<RpcAccountBalance>>> { debug!("get_largest_accounts rpc request received"); Ok(meta.get_largest_accounts(config)?) } fn get_supply( &self, meta: Self::Metadata, config: Option<RpcSupplyConfig>, ) -> Result<RpcResponse<RpcSupply>> { debug!("get_supply rpc request received"); Ok(meta.get_supply(config)?) } fn get_stake_activation( &self, meta: Self::Metadata, pubkey_str: String, config: Option<RpcEpochConfig>, ) -> Result<RpcStakeActivation> { debug!( "get_stake_activation rpc request received: {:?}", pubkey_str ); let pubkey = verify_pubkey(&pubkey_str)?; meta.get_stake_activation(&pubkey, config) } fn get_token_account_balance( &self, meta: Self::Metadata, pubkey_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>> { debug!( "get_token_account_balance rpc request received: {:?}", pubkey_str ); let pubkey = verify_pubkey(&pubkey_str)?; meta.get_token_account_balance(&pubkey, commitment) } fn get_token_supply( &self, meta: Self::Metadata, mint_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>> { debug!("get_token_supply rpc request received: {:?}", mint_str); let mint = verify_pubkey(&mint_str)?; meta.get_token_supply(&mint, commitment) } fn get_token_largest_accounts( &self, meta: Self::Metadata, mint_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Vec<RpcTokenAccountBalance>>> { debug!( "get_token_largest_accounts rpc request received: {:?}", mint_str ); let mint = verify_pubkey(&mint_str)?; meta.get_token_largest_accounts(&mint, commitment) } fn get_token_accounts_by_owner( &self, meta: Self::Metadata, owner_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>> { debug!( "get_token_accounts_by_owner rpc request received: {:?}", owner_str ); let owner = verify_pubkey(&owner_str)?; let token_account_filter = verify_token_account_filter(token_account_filter)?; meta.get_token_accounts_by_owner(&owner, token_account_filter, config) } fn get_token_accounts_by_delegate( &self, meta: Self::Metadata, delegate_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>> { debug!( "get_token_accounts_by_delegate rpc request received: {:?}", delegate_str ); let delegate = verify_pubkey(&delegate_str)?; let token_account_filter = verify_token_account_filter(token_account_filter)?; meta.get_token_accounts_by_delegate(&delegate, token_account_filter, config) } } } // Full RPC interface that an API node is expected to provide // (rpc_minimal should also be provided by an API node) pub mod rpc_full { use super::*; #[rpc] pub trait Full { type Metadata; #[rpc(meta, name = "getInflationReward")] fn get_inflation_reward( &self, meta: Self::Metadata, address_strs: Vec<String>, config: Option<RpcEpochConfig>, ) -> BoxFuture<Result<Vec<Option<RpcInflationReward>>>>; #[rpc(meta, name = "getClusterNodes")] fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>>; #[rpc(meta, name = "getRecentPerformanceSamples")] fn get_recent_performance_samples( &self, meta: Self::Metadata, limit: Option<usize>, ) -> Result<Vec<RpcPerfSample>>; #[rpc(meta, name = "getGenesisHash")] fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String>; #[rpc(meta, name = "getSignatureStatuses")] fn get_signature_statuses( &self, meta: Self::Metadata, signature_strs: Vec<String>, config: Option<RpcSignatureStatusConfig>, ) -> BoxFuture<Result<RpcResponse<Vec<Option<TransactionStatus>>>>>; #[rpc(meta, name = "getMaxRetransmitSlot")] fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result<Slot>; #[rpc(meta, name = "getMaxShredInsertSlot")] fn get_max_shred_insert_slot(&self, meta: Self::Metadata) -> Result<Slot>; #[rpc(meta, name = "requestAirdrop")] fn request_airdrop( &self, meta: Self::Metadata, pubkey_str: String, lamports: u64, config: Option<RpcRequestAirdropConfig>, ) -> Result<String>; #[rpc(meta, name = "sendTransaction")] fn send_transaction( &self, meta: Self::Metadata, data: String, config: Option<RpcSendTransactionConfig>, ) -> Result<String>; #[rpc(meta, name = "simulateTransaction")] fn simulate_transaction( &self, meta: Self::Metadata, data: String, config: Option<RpcSimulateTransactionConfig>, ) -> Result<RpcResponse<RpcSimulateTransactionResult>>; #[rpc(meta, name = "minimumLedgerSlot")] fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot>; #[rpc(meta, name = "getBlock")] fn get_block( &self, meta: Self::Metadata, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcBlockConfig>>, ) -> BoxFuture<Result<Option<UiConfirmedBlock>>>; #[rpc(meta, name = "getBlockTime")] fn get_block_time( &self, meta: Self::Metadata, slot: Slot, ) -> BoxFuture<Result<Option<UnixTimestamp>>>; #[rpc(meta, name = "getBlocks")] fn get_blocks( &self, meta: Self::Metadata, start_slot: Slot, config: Option<RpcBlocksConfigWrapper>, commitment: Option<CommitmentConfig>, ) -> BoxFuture<Result<Vec<Slot>>>; #[rpc(meta, name = "getBlocksWithLimit")] fn get_blocks_with_limit( &self, meta: Self::Metadata, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> BoxFuture<Result<Vec<Slot>>>; #[rpc(meta, name = "getTransaction")] fn get_transaction( &self, meta: Self::Metadata, signature_str: String, config: Option<RpcEncodingConfigWrapper<RpcTransactionConfig>>, ) -> BoxFuture<Result<Option<EncodedConfirmedTransaction>>>; #[rpc(meta, name = "getSignaturesForAddress")] fn get_signatures_for_address( &self, meta: Self::Metadata, address: String, config: Option<RpcSignaturesForAddressConfig>, ) -> BoxFuture<Result<Vec<RpcConfirmedTransactionStatusWithSignature>>>; #[rpc(meta, name = "getFirstAvailableBlock")] fn get_first_available_block(&self, meta: Self::Metadata) -> BoxFuture<Result<Slot>>; #[rpc(meta, name = "getLatestBlockhash")] fn get_latest_blockhash( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcBlockhash>>; #[rpc(meta, name = "isBlockhashValid")] fn is_blockhash_valid( &self, meta: Self::Metadata, blockhash: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<bool>>; #[rpc(meta, name = "getFeeForMessage")] fn get_fee_for_message( &self, meta: Self::Metadata, data: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Option<u64>>>; } pub struct FullImpl; impl Full for FullImpl { type Metadata = JsonRpcRequestProcessor; fn get_recent_performance_samples( &self, meta: Self::Metadata, limit: Option<usize>, ) -> Result<Vec<RpcPerfSample>> { debug!("get_recent_performance_samples request received"); let limit = limit.unwrap_or(PERFORMANCE_SAMPLES_LIMIT); if limit > PERFORMANCE_SAMPLES_LIMIT { return Err(Error::invalid_params(format!( "Invalid limit; max {}", PERFORMANCE_SAMPLES_LIMIT ))); } Ok(meta .blockstore .get_recent_perf_samples(limit) .map_err(|err| { warn!("get_recent_performance_samples failed: {:?}", err); Error::invalid_request() })? .iter() .map(|(slot, sample)| RpcPerfSample { slot: *slot, num_transactions: sample.num_transactions, num_slots: sample.num_slots, sample_period_secs: sample.sample_period_secs, }) .collect()) } fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>> { debug!("get_cluster_nodes rpc request received"); let cluster_info = &meta.cluster_info; let socket_addr_space = cluster_info.socket_addr_space(); let valid_address_or_none = |addr: &SocketAddr| -> Option<SocketAddr> { if ContactInfo::is_valid_address(addr, socket_addr_space) { Some(*addr) } else { None } }; let my_shred_version = cluster_info.my_shred_version(); Ok(cluster_info .all_peers() .iter() .filter_map(|(contact_info, _)| { if my_shred_version == contact_info.shred_version && ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space) { let (version, feature_set) = if let Some(version) = cluster_info.get_node_version(&contact_info.id) { (Some(version.to_string()), Some(version.feature_set)) } else { (None, None) }; Some(RpcContactInfo { pubkey: contact_info.id.to_string(), gossip: Some(contact_info.gossip), tpu: valid_address_or_none(&contact_info.tpu), rpc: valid_address_or_none(&contact_info.rpc), version, feature_set, shred_version: Some(my_shred_version), }) } else { None // Exclude spy nodes } }) .collect()) } fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String> { debug!("get_genesis_hash rpc request received"); Ok(meta.genesis_hash.to_string()) } fn get_signature_statuses( &self, meta: Self::Metadata, signature_strs: Vec<String>, config: Option<RpcSignatureStatusConfig>, ) -> BoxFuture<Result<RpcResponse<Vec<Option<TransactionStatus>>>>> { debug!( "get_signature_statuses rpc request received: {:?}", signature_strs.len() ); if signature_strs.len() > MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS { return Box::pin(future::err(Error::invalid_params(format!( "Too many inputs provided; max {}", MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS )))); } let mut signatures: Vec<Signature> = vec![]; for signature_str in signature_strs { match verify_signature(&signature_str) { Ok(signature) => { signatures.push(signature); } Err(err) => return Box::pin(future::err(err)), } } Box::pin(async move { meta.get_signature_statuses(signatures, config).await }) } fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result<Slot> { debug!("get_max_retransmit_slot rpc request received"); Ok(meta.get_max_retransmit_slot()) } fn get_max_shred_insert_slot(&self, meta: Self::Metadata) -> Result<Slot> { debug!("get_max_shred_insert_slot rpc request received"); Ok(meta.get_max_shred_insert_slot()) } fn request_airdrop( &self, meta: Self::Metadata, pubkey_str: String, lamports: u64, config: Option<RpcRequestAirdropConfig>, ) -> Result<String> { debug!("request_airdrop rpc request received"); trace!( "request_airdrop id={} lamports={} config: {:?}", pubkey_str, lamports, &config ); let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?; let pubkey = verify_pubkey(&pubkey_str)?; let config = config.unwrap_or_default(); let bank = meta.bank(config.commitment); let blockhash = if let Some(blockhash) = config.recent_blockhash { verify_hash(&blockhash)? } else { bank.confirmed_last_blockhash() }; let last_valid_block_height = bank .get_blockhash_last_valid_block_height(&blockhash) .unwrap_or(0); let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash).map_err( |err| { info!("request_airdrop_transaction failed: {:?}", err); Error::internal_error() }, )?; let wire_transaction = serialize(&transaction).map_err(|err| { info!("request_airdrop: serialize error: {:?}", err); Error::internal_error() })?; let signature = if !transaction.signatures.is_empty() { transaction.signatures[0] } else { return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); }; _send_transaction( meta, signature, wire_transaction, last_valid_block_height, None, None, ) } fn send_transaction( &self, meta: Self::Metadata, data: String, config: Option<RpcSendTransactionConfig>, ) -> Result<String> { debug!("send_transaction rpc request received"); let config = config.unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); let (wire_transaction, unsanitized_tx) = decode_and_deserialize::<VersionedTransaction>(data, encoding)?; let preflight_commitment = config .preflight_commitment .map(|commitment| CommitmentConfig { commitment }); let preflight_bank = &*meta.bank(preflight_commitment); let transaction = sanitize_transaction(unsanitized_tx)?; let signature = *transaction.signature(); let mut last_valid_block_height = preflight_bank .get_blockhash_last_valid_block_height(transaction.message().recent_blockhash()) .unwrap_or(0); let durable_nonce_info = transaction .get_durable_nonce( preflight_bank .feature_set .is_active(&nonce_must_be_writable::id()), ) .map(|&pubkey| (pubkey, *transaction.message().recent_blockhash())); if durable_nonce_info.is_some() { // While it uses a defined constant, this last_valid_block_height value is chosen arbitrarily. // It provides a fallback timeout for durable-nonce transaction retries in case of // malicious packing of the retry queue. Durable-nonce transactions are otherwise // retried until the nonce is advanced. last_valid_block_height = preflight_bank.block_height() + MAX_RECENT_BLOCKHASHES as u64; } if !config.skip_preflight { if let Err(e) = verify_transaction(&transaction, &preflight_bank.feature_set) { return Err(e); } match meta.health.check() { RpcHealthStatus::Ok => (), RpcHealthStatus::Unknown => { inc_new_counter_info!("rpc-send-tx_health-unknown", 1); return Err(RpcCustomError::NodeUnhealthy { num_slots_behind: None, } .into()); } RpcHealthStatus::Behind { num_slots } => { inc_new_counter_info!("rpc-send-tx_health-behind", 1); return Err(RpcCustomError::NodeUnhealthy { num_slots_behind: Some(num_slots), } .into()); } } if let TransactionSimulationResult { result: Err(err), logs, post_simulation_accounts: _, units_consumed, } = preflight_bank.simulate_transaction(transaction) { match err { TransactionError::BlockhashNotFound => { inc_new_counter_info!("rpc-send-tx_err-blockhash-not-found", 1); } _ => { inc_new_counter_info!("rpc-send-tx_err-other", 1); } } return Err(RpcCustomError::SendTransactionPreflightFailure { message: format!("Transaction simulation failed: {}", err), result: RpcSimulateTransactionResult { err: Some(err), logs: Some(logs), accounts: None, units_consumed: Some(units_consumed), }, } .into()); } } _send_transaction( meta, signature, wire_transaction, last_valid_block_height, durable_nonce_info, config.max_retries, ) } fn simulate_transaction( &self, meta: Self::Metadata, data: String, config: Option<RpcSimulateTransactionConfig>, ) -> Result<RpcResponse<RpcSimulateTransactionResult>> { debug!("simulate_transaction rpc request received"); let config = config.unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); let (_, mut unsanitized_tx) = decode_and_deserialize::<VersionedTransaction>(data, encoding)?; let bank = &*meta.bank(config.commitment); if config.replace_recent_blockhash { if config.sig_verify { return Err(Error::invalid_params( "sigVerify may not be used with replaceRecentBlockhash", )); } unsanitized_tx .message .set_recent_blockhash(bank.last_blockhash()); } let transaction = sanitize_transaction(unsanitized_tx)?; if config.sig_verify { verify_transaction(&transaction, &bank.feature_set)?; } let TransactionSimulationResult { result, logs, post_simulation_accounts, units_consumed, } = bank.simulate_transaction(transaction); let accounts = if let Some(config_accounts) = config.accounts { let accounts_encoding = config_accounts .encoding .unwrap_or(UiAccountEncoding::Base64); if accounts_encoding == UiAccountEncoding::Binary || accounts_encoding == UiAccountEncoding::Base58 { return Err(Error::invalid_params("base58 encoding not supported")); } if config_accounts.addresses.len() > post_simulation_accounts.len() { return Err(Error::invalid_params(format!( "Too many accounts provided; max {}", post_simulation_accounts.len() ))); } let mut accounts = vec![]; for address_str in config_accounts.addresses { let address = verify_pubkey(&address_str)?; accounts.push(if result.is_err() { None } else { post_simulation_accounts .iter() .find(|(key, _account)| key == &address) .map(|(pubkey, account)| { UiAccount::encode(pubkey, account, accounts_encoding, None, None) }) }); } Some(accounts) } else { None }; Ok(new_response( bank, RpcSimulateTransactionResult { err: result.err(), logs: Some(logs), accounts, units_consumed: Some(units_consumed), }, )) } fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot> { debug!("minimum_ledger_slot rpc request received"); meta.minimum_ledger_slot() } fn get_block( &self, meta: Self::Metadata, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcBlockConfig>>, ) -> BoxFuture<Result<Option<UiConfirmedBlock>>> { debug!("get_block rpc request received: {:?}", slot); Box::pin(async move { meta.get_block(slot, config).await }) } fn get_blocks( &self, meta: Self::Metadata, start_slot: Slot, config: Option<RpcBlocksConfigWrapper>, commitment: Option<CommitmentConfig>, ) -> BoxFuture<Result<Vec<Slot>>> { let (end_slot, maybe_commitment) = config.map(|config| config.unzip()).unwrap_or_default(); debug!( "get_blocks rpc request received: {}-{:?}", start_slot, end_slot ); Box::pin(async move { meta.get_blocks(start_slot, end_slot, commitment.or(maybe_commitment)) .await }) } fn get_blocks_with_limit( &self, meta: Self::Metadata, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> BoxFuture<Result<Vec<Slot>>> { debug!( "get_blocks_with_limit rpc request received: {}-{}", start_slot, limit, ); Box::pin(async move { meta.get_blocks_with_limit(start_slot, limit, commitment) .await }) } fn get_block_time( &self, meta: Self::Metadata, slot: Slot, ) -> BoxFuture<Result<Option<UnixTimestamp>>> { Box::pin(async move { meta.get_block_time(slot).await }) } fn get_transaction( &self, meta: Self::Metadata, signature_str: String, config: Option<RpcEncodingConfigWrapper<RpcTransactionConfig>>, ) -> BoxFuture<Result<Option<EncodedConfirmedTransaction>>> { debug!("get_transaction rpc request received: {:?}", signature_str); let signature = verify_signature(&signature_str); if let Err(err) = signature { return Box::pin(future::err(err)); } Box::pin(async move { meta.get_transaction(signature.unwrap(), config).await }) } fn get_signatures_for_address( &self, meta: Self::Metadata, address: String, config: Option<RpcSignaturesForAddressConfig>, ) -> BoxFuture<Result<Vec<RpcConfirmedTransactionStatusWithSignature>>> { let config = config.unwrap_or_default(); let commitment = config.commitment; let verification = verify_and_parse_signatures_for_address_params( address, config.before, config.until, config.limit, ); match verification { Err(err) => Box::pin(future::err(err)), Ok((address, before, until, limit)) => Box::pin(async move { meta.get_signatures_for_address(address, before, until, limit, commitment) .await }), } } fn get_first_available_block(&self, meta: Self::Metadata) -> BoxFuture<Result<Slot>> { debug!("get_first_available_block rpc request received"); Box::pin(async move { Ok(meta.get_first_available_block().await) }) } fn get_inflation_reward( &self, meta: Self::Metadata, address_strs: Vec<String>, config: Option<RpcEpochConfig>, ) -> BoxFuture<Result<Vec<Option<RpcInflationReward>>>> { debug!( "get_inflation_reward rpc request received: {:?}", address_strs.len() ); let mut addresses: Vec<Pubkey> = vec![]; for address_str in address_strs { match verify_pubkey(&address_str) { Ok(pubkey) => { addresses.push(pubkey); } Err(err) => return Box::pin(future::err(err)), } } Box::pin(async move { meta.get_inflation_reward(addresses, config).await }) } fn get_latest_blockhash( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcBlockhash>> { debug!("get_latest_blockhash rpc request received"); Ok(meta.get_latest_blockhash(commitment)) } fn is_blockhash_valid( &self, meta: Self::Metadata, blockhash: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<bool>> { let blockhash = Hash::from_str(&blockhash) .map_err(|e| Error::invalid_params(format!("{:?}", e)))?; Ok(meta.is_blockhash_valid(&blockhash, commitment)) } fn get_fee_for_message( &self, meta: Self::Metadata, data: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Option<u64>>> { debug!("get_fee_for_message rpc request received"); let (_, message) = decode_and_deserialize::<Message>(data, UiTransactionEncoding::Base64)?; let sanitized_message = SanitizedMessage::try_from(message).map_err(|err| { Error::invalid_params(format!("invalid transaction message: {}", err)) })?; Ok(meta.get_fee_for_message(&sanitized_message, commitment)) } } } // RPC methods deprecated in v1.8 pub mod rpc_deprecated_v1_9 { #![allow(deprecated)] use super::*; #[rpc] pub trait DeprecatedV1_9 { type Metadata; #[rpc(meta, name = "getRecentBlockhash")] fn get_recent_blockhash( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcBlockhashFeeCalculator>>; #[rpc(meta, name = "getFees")] fn get_fees( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcFees>>; #[rpc(meta, name = "getFeeCalculatorForBlockhash")] fn get_fee_calculator_for_blockhash( &self, meta: Self::Metadata, blockhash: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Option<RpcFeeCalculator>>>; #[rpc(meta, name = "getFeeRateGovernor")] fn get_fee_rate_governor( &self, meta: Self::Metadata, ) -> Result<RpcResponse<RpcFeeRateGovernor>>; #[rpc(meta, name = "getSnapshotSlot")] fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result<Slot>; } pub struct DeprecatedV1_9Impl; impl DeprecatedV1_9 for DeprecatedV1_9Impl { type Metadata = JsonRpcRequestProcessor; fn get_recent_blockhash( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcBlockhashFeeCalculator>> { debug!("get_recent_blockhash rpc request received"); Ok(meta.get_recent_blockhash(commitment)) } fn get_fees( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcFees>> { debug!("get_fees rpc request received"); Ok(meta.get_fees(commitment)) } fn get_fee_calculator_for_blockhash( &self, meta: Self::Metadata, blockhash: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Option<RpcFeeCalculator>>> { debug!("get_fee_calculator_for_blockhash rpc request received"); let blockhash = Hash::from_str(&blockhash) .map_err(|e| Error::invalid_params(format!("{:?}", e)))?; Ok(meta.get_fee_calculator_for_blockhash(&blockhash, commitment)) } fn get_fee_rate_governor( &self, meta: Self::Metadata, ) -> Result<RpcResponse<RpcFeeRateGovernor>> { debug!("get_fee_rate_governor rpc request received"); Ok(meta.get_fee_rate_governor()) } fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result<Slot> { debug!("get_snapshot_slot rpc request received"); meta.snapshot_config .and_then(|snapshot_config| { snapshot_utils::get_highest_full_snapshot_archive_slot( &snapshot_config.snapshot_archives_dir, ) }) .ok_or_else(|| RpcCustomError::NoSnapshot.into()) } } } // RPC methods deprecated in v1.7 pub mod rpc_deprecated_v1_7 { #![allow(deprecated)] use super::*; #[rpc] pub trait DeprecatedV1_7 { type Metadata; // DEPRECATED #[rpc(meta, name = "getConfirmedBlock")] fn get_confirmed_block( &self, meta: Self::Metadata, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcConfirmedBlockConfig>>, ) -> BoxFuture<Result<Option<UiConfirmedBlock>>>; // DEPRECATED #[rpc(meta, name = "getConfirmedBlocks")] fn get_confirmed_blocks( &self, meta: Self::Metadata, start_slot: Slot, config: Option<RpcConfirmedBlocksConfigWrapper>, commitment: Option<CommitmentConfig>, ) -> BoxFuture<Result<Vec<Slot>>>; // DEPRECATED #[rpc(meta, name = "getConfirmedBlocksWithLimit")] fn get_confirmed_blocks_with_limit( &self, meta: Self::Metadata, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> BoxFuture<Result<Vec<Slot>>>; // DEPRECATED #[rpc(meta, name = "getConfirmedTransaction")] fn get_confirmed_transaction( &self, meta: Self::Metadata, signature_str: String, config: Option<RpcEncodingConfigWrapper<RpcConfirmedTransactionConfig>>, ) -> BoxFuture<Result<Option<EncodedConfirmedTransaction>>>; // DEPRECATED #[rpc(meta, name = "getConfirmedSignaturesForAddress2")] fn get_confirmed_signatures_for_address2( &self, meta: Self::Metadata, address: String, config: Option<RpcGetConfirmedSignaturesForAddress2Config>, ) -> BoxFuture<Result<Vec<RpcConfirmedTransactionStatusWithSignature>>>; } pub struct DeprecatedV1_7Impl; impl DeprecatedV1_7 for DeprecatedV1_7Impl { type Metadata = JsonRpcRequestProcessor; fn get_confirmed_block( &self, meta: Self::Metadata, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcConfirmedBlockConfig>>, ) -> BoxFuture<Result<Option<UiConfirmedBlock>>> { debug!("get_confirmed_block rpc request received: {:?}", slot); Box::pin(async move { meta.get_block(slot, config.map(|config| config.convert())) .await }) } fn get_confirmed_blocks( &self, meta: Self::Metadata, start_slot: Slot, config: Option<RpcConfirmedBlocksConfigWrapper>, commitment: Option<CommitmentConfig>, ) -> BoxFuture<Result<Vec<Slot>>> { let (end_slot, maybe_commitment) = config.map(|config| config.unzip()).unwrap_or_default(); debug!( "get_confirmed_blocks rpc request received: {}-{:?}", start_slot, end_slot ); Box::pin(async move { meta.get_blocks(start_slot, end_slot, commitment.or(maybe_commitment)) .await }) } fn get_confirmed_blocks_with_limit( &self, meta: Self::Metadata, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> BoxFuture<Result<Vec<Slot>>> { debug!( "get_confirmed_blocks_with_limit rpc request received: {}-{}", start_slot, limit, ); Box::pin(async move { meta.get_blocks_with_limit(start_slot, limit, commitment) .await }) } fn get_confirmed_transaction( &self, meta: Self::Metadata, signature_str: String, config: Option<RpcEncodingConfigWrapper<RpcConfirmedTransactionConfig>>, ) -> BoxFuture<Result<Option<EncodedConfirmedTransaction>>> { debug!( "get_confirmed_transaction rpc request received: {:?}", signature_str ); let signature = verify_signature(&signature_str); if let Err(err) = signature { return Box::pin(future::err(err)); } Box::pin(async move { meta.get_transaction(signature.unwrap(), config.map(|config| config.convert())) .await }) } fn get_confirmed_signatures_for_address2( &self, meta: Self::Metadata, address: String, config: Option<RpcGetConfirmedSignaturesForAddress2Config>, ) -> BoxFuture<Result<Vec<RpcConfirmedTransactionStatusWithSignature>>> { let config = config.unwrap_or_default(); let commitment = config.commitment; let verification = verify_and_parse_signatures_for_address_params( address, config.before, config.until, config.limit, ); match verification { Err(err) => Box::pin(future::err(err)), Ok((address, before, until, limit)) => Box::pin(async move { meta.get_signatures_for_address(address, before, until, limit, commitment) .await }), } } } } // Obsolete RPC methods, collected for easy deactivation and removal pub mod rpc_obsolete_v1_7 { use super::*; #[rpc] pub trait ObsoleteV1_7 { type Metadata; // DEPRECATED #[rpc(meta, name = "confirmTransaction")] fn confirm_transaction( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<bool>>; // DEPRECATED #[rpc(meta, name = "getSignatureStatus")] fn get_signature_status( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<Option<transaction::Result<()>>>; // DEPRECATED (used by Trust Wallet) #[rpc(meta, name = "getSignatureConfirmation")] fn get_signature_confirmation( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<Option<RpcSignatureConfirmation>>; // DEPRECATED #[rpc(meta, name = "getTotalSupply")] fn get_total_supply( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64>; // DEPRECATED #[rpc(meta, name = "getConfirmedSignaturesForAddress")] fn get_confirmed_signatures_for_address( &self, meta: Self::Metadata, pubkey_str: String, start_slot: Slot, end_slot: Slot, ) -> Result<Vec<String>>; } pub struct ObsoleteV1_7Impl; impl ObsoleteV1_7 for ObsoleteV1_7Impl { type Metadata = JsonRpcRequestProcessor; fn confirm_transaction( &self, meta: Self::Metadata, id: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<bool>> { debug!("confirm_transaction rpc request received: {:?}", id); let signature = verify_signature(&id)?; Ok(meta.confirm_transaction(&signature, commitment)) } fn get_signature_status( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<Option<transaction::Result<()>>> { debug!( "get_signature_status rpc request received: {:?}", signature_str ); let signature = verify_signature(&signature_str)?; Ok(meta.get_signature_status(signature, commitment)) } fn get_signature_confirmation( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<Option<RpcSignatureConfirmation>> { debug!( "get_signature_confirmation rpc request received: {:?}", signature_str ); let signature = verify_signature(&signature_str)?; Ok(meta.get_signature_confirmation_status(signature, commitment)) } fn get_total_supply( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64> { debug!("get_total_supply rpc request received"); Ok(meta.get_total_supply(commitment)) } fn get_confirmed_signatures_for_address( &self, meta: Self::Metadata, pubkey_str: String, start_slot: Slot, end_slot: Slot, ) -> Result<Vec<String>> { debug!( "get_confirmed_signatures_for_address rpc request received: {:?} {:?}-{:?}", pubkey_str, start_slot, end_slot ); let pubkey = verify_pubkey(&pubkey_str)?; if end_slot < start_slot { return Err(Error::invalid_params(format!( "start_slot {} must be less than or equal to end_slot {}", start_slot, end_slot ))); } if end_slot - start_slot > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE { return Err(Error::invalid_params(format!( "Slot range too large; max {}", MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE ))); } Ok(meta .get_confirmed_signatures_for_address(pubkey, start_slot, end_slot) .iter() .map(|signature| signature.to_string()) .collect()) } } } const MAX_BASE58_SIZE: usize = 1683; // Golden, bump if PACKET_DATA_SIZE changes const MAX_BASE64_SIZE: usize = 1644; // Golden, bump if PACKET_DATA_SIZE changes fn decode_and_deserialize<T>( encoded: String, encoding: UiTransactionEncoding, ) -> Result<(Vec<u8>, T)> where T: serde::de::DeserializeOwned, { let wire_output = match encoding { UiTransactionEncoding::Base58 => { inc_new_counter_info!("rpc-base58_encoded_tx", 1); if encoded.len() > MAX_BASE58_SIZE { return Err(Error::invalid_params(format!( "encoded {} too large: {} bytes (max: encoded/raw {}/{})", type_name::<T>(), encoded.len(), MAX_BASE58_SIZE, PACKET_DATA_SIZE, ))); } bs58::decode(encoded) .into_vec() .map_err(|e| Error::invalid_params(format!("{:?}", e)))? } UiTransactionEncoding::Base64 => { inc_new_counter_info!("rpc-base64_encoded_tx", 1); if encoded.len() > MAX_BASE64_SIZE { return Err(Error::invalid_params(format!( "encoded {} too large: {} bytes (max: encoded/raw {}/{})", type_name::<T>(), encoded.len(), MAX_BASE64_SIZE, PACKET_DATA_SIZE, ))); } base64::decode(encoded).map_err(|e| Error::invalid_params(format!("{:?}", e)))? } _ => { return Err(Error::invalid_params(format!( "unsupported encoding: {}. Supported encodings: base58, base64", encoding ))) } }; if wire_output.len() > PACKET_DATA_SIZE { let err = format!( "encoded {} too large: {} bytes (max: {} bytes)", type_name::<T>(), wire_output.len(), PACKET_DATA_SIZE ); info!("{}", err); return Err(Error::invalid_params(&err)); } bincode::options() .with_limit(PACKET_DATA_SIZE as u64) .with_fixint_encoding() .allow_trailing_bytes() .deserialize_from(&wire_output[..]) .map_err(|err| { info!("deserialize error: {}", err); Error::invalid_params(&err.to_string()) }) .map(|output| (wire_output, output)) } fn sanitize_transaction(transaction: VersionedTransaction) -> Result<SanitizedTransaction> { let message_hash = transaction.message.hash(); SanitizedTransaction::try_create(transaction, message_hash, None, |_| { Err(TransactionError::UnsupportedVersion) }) .map_err(|err| Error::invalid_params(format!("invalid transaction: {}", err))) } pub(crate) fn create_validator_exit(exit: &Arc<AtomicBool>) -> Arc<RwLock<Exit>> { let mut validator_exit = Exit::default(); let exit_ = exit.clone(); validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed))); Arc::new(RwLock::new(validator_exit)) } // Used for tests pub fn create_test_transactions_and_populate_blockstore( keypairs: Vec<&Keypair>, previous_slot: Slot, bank: Arc<Bank>, blockstore: Arc<Blockstore>, max_complete_transaction_status_slot: Arc<AtomicU64>, ) -> Vec<Signature> { let mint_keypair = keypairs[0]; let keypair1 = keypairs[1]; let keypair2 = keypairs[2]; let keypair3 = keypairs[3]; let slot = bank.slot(); let blockhash = bank.confirmed_last_blockhash(); // Generate transactions for processing // Successful transaction let success_tx = solana_sdk::system_transaction::transfer(mint_keypair, &keypair1.pubkey(), 2, blockhash); let success_signature = success_tx.signatures[0]; let entry_1 = solana_entry::entry::next_entry(&blockhash, 1, vec![success_tx]); // Failed transaction, InstructionError let ix_error_tx = solana_sdk::system_transaction::transfer(keypair2, &keypair3.pubkey(), 10, blockhash); let ix_error_signature = ix_error_tx.signatures[0]; let entry_2 = solana_entry::entry::next_entry(&entry_1.hash, 1, vec![ix_error_tx]); // Failed transaction let fail_tx = solana_sdk::system_transaction::transfer( mint_keypair, &keypair2.pubkey(), 2, Hash::default(), ); let entry_3 = solana_entry::entry::next_entry(&entry_2.hash, 1, vec![fail_tx]); let entries = vec![entry_1, entry_2, entry_3]; let shreds = solana_ledger::blockstore::entries_to_test_shreds( entries.clone(), slot, previous_slot, true, 0, ); blockstore.insert_shreds(shreds, None, false).unwrap(); blockstore.set_roots(std::iter::once(&slot)).unwrap(); let (transaction_status_sender, transaction_status_receiver) = crossbeam_channel::unbounded(); let (replay_vote_sender, _replay_vote_receiver) = crossbeam_channel::unbounded(); let transaction_status_service = crate::transaction_status_service::TransactionStatusService::new( transaction_status_receiver, max_complete_transaction_status_slot, true, None, blockstore, &Arc::new(AtomicBool::new(false)), ); // Check that process_entries successfully writes can_commit transactions statuses, and // that they are matched properly by get_rooted_block let _result = solana_ledger::blockstore_processor::process_entries_for_tests( &bank, entries, true, Some( &solana_ledger::blockstore_processor::TransactionStatusSender { sender: transaction_status_sender, enable_cpi_and_log_storage: false, }, ), Some(&replay_vote_sender), ); transaction_status_service.join().unwrap(); vec![success_signature, ix_error_signature] } #[cfg(test)] pub mod tests { use { super::{ rpc_accounts::*, rpc_bank::*, rpc_deprecated_v1_9::*, rpc_full::*, rpc_minimal::*, *, }, crate::{ optimistically_confirmed_bank_tracker::{ BankNotification, OptimisticallyConfirmedBankTracker, }, rpc_subscriptions::RpcSubscriptions, }, bincode::deserialize, jsonrpc_core::{futures, ErrorCode, MetaIoHandler, Output, Response, Value}, jsonrpc_core_client::transports::local, solana_client::rpc_filter::{Memcmp, MemcmpEncodedBytes}, solana_gossip::{contact_info::ContactInfo, socketaddr}, solana_ledger::{ blockstore_meta::PerfSample, blockstore_processor::fill_blockstore_slot_with_ticks, genesis_utils::{create_genesis_config, GenesisConfigInfo}, }, solana_runtime::{ accounts_background_service::AbsRequestSender, commitment::BlockCommitment, non_circulating_supply::non_circulating_accounts, }, solana_sdk::{ account::Account, clock::MAX_RECENT_BLOCKHASHES, fee_calculator::DEFAULT_BURN_PERCENT, hash::{hash, Hash}, instruction::InstructionError, message::Message, nonce, rpc_port, signature::{Keypair, Signer}, system_program, system_transaction, timing::slot_duration_from_slots_per_year, transaction::{self, Transaction, TransactionError}, }, solana_transaction_status::{ EncodedConfirmedBlock, EncodedTransaction, EncodedTransactionWithStatusMeta, TransactionDetails, UiMessage, }, solana_vote_program::{ vote_instruction, vote_state::{BlockTimestamp, Vote, VoteInit, VoteStateVersions, MAX_LOCKOUT_HISTORY}, }, spl_token::{ solana_program::{program_option::COption, pubkey::Pubkey as SplTokenPubkey}, state::AccountState as TokenAccountState, state::Mint, }, std::collections::HashMap, }; const TEST_MINT_LAMPORTS: u64 = 1_000_000; const TEST_SLOTS_PER_EPOCH: u64 = DELINQUENT_VALIDATOR_SLOT_DISTANCE + 1; struct RpcHandler { io: MetaIoHandler<JsonRpcRequestProcessor>, meta: JsonRpcRequestProcessor, bank: Arc<Bank>, bank_forks: Arc<RwLock<BankForks>>, blockhash: Hash, alice: Keypair, leader_pubkey: Pubkey, leader_vote_keypair: Arc<Keypair>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, confirmed_block_signatures: Vec<Signature>, } fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler { start_rpc_handler_with_tx_and_blockstore(pubkey, vec![]) } fn start_rpc_handler_with_tx_and_blockstore( pubkey: &Pubkey, blockstore_roots: Vec<Slot>, ) -> RpcHandler { let (bank_forks, alice, leader_vote_keypair) = new_bank_forks(); let bank = bank_forks.read().unwrap().working_bank(); let vote_pubkey = leader_vote_keypair.pubkey(); let mut vote_account = bank.get_account(&vote_pubkey).unwrap_or_default(); let mut vote_state = VoteState::from(&vote_account).unwrap_or_default(); vote_state.last_timestamp = BlockTimestamp { slot: bank.slot(), timestamp: bank.clock().unix_timestamp, }; let versioned = VoteStateVersions::new_current(vote_state); VoteState::to(&versioned, &mut vote_account).unwrap(); bank.store_account(&vote_pubkey, &vote_account); let ledger_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&ledger_path).unwrap(); let blockstore = Arc::new(blockstore); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); bank.transfer(4, &alice, &keypair2.pubkey()).unwrap(); let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root())); let confirmed_block_signatures = create_test_transactions_and_populate_blockstore( vec![&alice, &keypair1, &keypair2, &keypair3], 0, bank.clone(), blockstore.clone(), max_complete_transaction_status_slot.clone(), ); let mut commitment_slot0 = BlockCommitment::default(); commitment_slot0.increase_confirmation_stake(2, 9); let mut commitment_slot1 = BlockCommitment::default(); commitment_slot1.increase_confirmation_stake(1, 9); let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new(); block_commitment.entry(0).or_insert(commitment_slot0); block_commitment.entry(1).or_insert(commitment_slot1); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new( block_commitment, 10, CommitmentSlots::new_from_slot(bank.slot()), ))); let mut roots = blockstore_roots; if !roots.is_empty() { roots.retain(|&x| x > 0); let mut parent_bank = bank; for (i, root) in roots.iter().enumerate() { let new_bank = Bank::new_from_parent(&parent_bank, parent_bank.collector_id(), *root); parent_bank = bank_forks.write().unwrap().insert(new_bank); let parent = if i > 0 { roots[i - 1] } else { 0 }; fill_blockstore_slot_with_ticks(&blockstore, 5, *root, parent, Hash::default()); } blockstore.set_roots(roots.iter()).unwrap(); let new_bank = Bank::new_from_parent( &parent_bank, parent_bank.collector_id(), roots.iter().max().unwrap() + 1, ); bank_forks.write().unwrap().insert(new_bank); for root in roots.iter() { bank_forks .write() .unwrap() .set_root(*root, &AbsRequestSender::default(), Some(0)); let mut stakes = HashMap::new(); stakes.insert( leader_vote_keypair.pubkey(), (1, AccountSharedData::default()), ); let block_time = bank_forks .read() .unwrap() .get(*root) .unwrap() .clock() .unix_timestamp; blockstore.cache_block_time(*root, block_time).unwrap(); } } let bank = bank_forks.read().unwrap().working_bank(); let leader_pubkey = *bank.collector_id(); let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); let blockhash = bank.confirmed_last_blockhash(); let tx = system_transaction::transfer(&alice, pubkey, 20, blockhash); bank.process_transaction(&tx).expect("process transaction"); let tx = system_transaction::transfer(&alice, &non_circulating_accounts()[0], 20, blockhash); bank.process_transaction(&tx).expect("process transaction"); let tx = system_transaction::transfer(&alice, pubkey, std::u64::MAX, blockhash); let _ = bank.process_transaction(&tx); let cluster_info = Arc::new(ClusterInfo::new( ContactInfo { id: alice.pubkey(), ..ContactInfo::default() }, Arc::new(Keypair::new()), SocketAddrSpace::Unspecified, )); let tpu_address = cluster_info.my_contact_info().tpu; cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr( &leader_pubkey, &socketaddr!("127.0.0.1:1234"), )); let sample1 = PerfSample { num_slots: 1, num_transactions: 4, sample_period_secs: 60, }; blockstore .write_perf_sample(0, &sample1) .expect("write to blockstore"); let max_slots = Arc::new(MaxSlots::default()); max_slots.retransmit.store(42, Ordering::Relaxed); max_slots.shred_insert.store(43, Ordering::Relaxed); let (meta, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig { enable_rpc_transaction_history: true, ..JsonRpcConfig::default() }, None, bank_forks.clone(), block_commitment_cache.clone(), blockstore, validator_exit, RpcHealth::stub(), cluster_info.clone(), Hash::default(), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), max_slots, Arc::new(LeaderScheduleCache::new_from_bank(&bank)), max_complete_transaction_status_slot, ); SendTransactionService::new::<NullTpuInfo>( tpu_address, &bank_forks, None, receiver, 1000, 1, ); cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr( &leader_pubkey, &socketaddr!("127.0.0.1:1234"), )); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); io.extend_with(rpc_bank::BankDataImpl.to_delegate()); io.extend_with(rpc_accounts::AccountsDataImpl.to_delegate()); io.extend_with(rpc_full::FullImpl.to_delegate()); io.extend_with(rpc_deprecated_v1_9::DeprecatedV1_9Impl.to_delegate()); RpcHandler { io, meta, bank, bank_forks, blockhash, alice, leader_pubkey, leader_vote_keypair, block_commitment_cache, confirmed_block_signatures, } } #[test] fn test_rpc_request_processor_new() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let genesis = create_genesis_config(100); let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); bank.transfer(20, &genesis.mint_keypair, &bob_pubkey) .unwrap(); let request_processor = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified); assert_eq!(request_processor.get_transaction_count(None), 1); } #[test] fn test_rpc_get_balance() { let genesis = create_genesis_config(20); let mint_pubkey = genesis.mint_keypair.pubkey(); let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, mint_pubkey ); let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":20, }, "id": 1, }); let result = serde_json::from_str::<Value>(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_balance_via_client() { let genesis = create_genesis_config(20); let mint_pubkey = genesis.mint_keypair.pubkey(); let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); async fn use_client(client: rpc_minimal::gen_client::Client, mint_pubkey: Pubkey) -> u64 { client .get_balance(mint_pubkey.to_string(), None) .await .unwrap() .value } let fut = async { let (client, server) = local::connect_with_metadata::<rpc_minimal::gen_client::Client, _, _>(&io, meta); let client = use_client(client, mint_pubkey); futures::join!(client, server) }; let (response, _) = futures::executor::block_on(fut); assert_eq!(response, 20); } #[test] fn test_rpc_get_cluster_nodes() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, leader_pubkey, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}"#; let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let expected = format!( r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "shredVersion": 0, "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}", "version": null, "featureSet": null}}],"id":1}}"#, leader_pubkey, rpc_port::DEFAULT_RPC_PORT ); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_recent_performance_samples() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples"}"#; let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let expected = json!({ "jsonrpc": "2.0", "id": 1, "result": [ { "slot": 0, "numSlots": 1, "numTransactions": 4, "samplePeriodSecs": 60 } ], }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_recent_performance_samples_invalid_limit() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples","params":[10000]}"#; let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let expected = json!({ "jsonrpc": "2.0", "error": { "code": -32602, "message": "Invalid limit; max 720" }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_slot_leader() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, leader_pubkey, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}"#; let res = io.handle_request_sync(req, meta); let expected = format!(r#"{{"jsonrpc":"2.0","result":"{}","id":1}}"#, leader_pubkey); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_tx_count() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let genesis = create_genesis_config(10); let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); // Add 4 transactions bank.transfer(1, &genesis.mint_keypair, &bob_pubkey) .unwrap(); bank.transfer(2, &genesis.mint_keypair, &bob_pubkey) .unwrap(); bank.transfer(3, &genesis.mint_keypair, &bob_pubkey) .unwrap(); bank.transfer(4, &genesis.mint_keypair, &bob_pubkey) .unwrap(); let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}"#; let res = io.handle_request_sync(req, meta); let expected = r#"{"jsonrpc":"2.0","result":4,"id":1}"#; let expected: Response = serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_minimum_ledger_slot() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"minimumLedgerSlot"}"#; let res = io.handle_request_sync(req, meta); let expected = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; let expected: Response = serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_get_supply() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSupply"}"#; let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let supply: RpcSupply = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(supply.non_circulating, 20); assert!(supply.circulating >= TEST_MINT_LAMPORTS); assert!(supply.total >= TEST_MINT_LAMPORTS + 20); let expected_accounts: Vec<String> = non_circulating_accounts() .iter() .map(|pubkey| pubkey.to_string()) .collect(); assert_eq!( supply.non_circulating_accounts.len(), expected_accounts.len() ); for address in supply.non_circulating_accounts { assert!(expected_accounts.contains(&address)); } } #[test] fn test_get_supply_exclude_account_list() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSupply","params":[{"excludeNonCirculatingAccountsList":true}]}"#; let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let supply: RpcSupply = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(supply.non_circulating, 20); assert!(supply.circulating >= TEST_MINT_LAMPORTS); assert!(supply.total >= TEST_MINT_LAMPORTS + 20); assert!(supply.non_circulating_accounts.is_empty()); } #[test] fn test_get_largest_accounts() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, alice, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts"}"#; let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec<RpcAccountBalance> = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(largest_accounts.len(), 20); // Get Alice balance let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, alice.pubkey() ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let alice_balance: u64 = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert!(largest_accounts.contains(&RpcAccountBalance { address: alice.pubkey().to_string(), lamports: alice_balance, })); // Get Bob balance let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, bob_pubkey ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let bob_balance: u64 = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert!(largest_accounts.contains(&RpcAccountBalance { address: bob_pubkey.to_string(), lamports: bob_balance, })); // Test Circulating/NonCirculating Filter let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{"filter":"circulating"}]}"#; let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec<RpcAccountBalance> = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(largest_accounts.len(), 20); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{"filter":"nonCirculating"}]}"#; let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec<RpcAccountBalance> = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(largest_accounts.len(), 1); } #[test] fn test_rpc_get_minimum_balance_for_rent_exemption() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let data_len = 50; let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getMinimumBalanceForRentExemption","params":[{}]}}"#, data_len ); let rep = io.handle_request_sync(&req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let minimum_balance: u64 = if let Response::Single(res) = res { if let Output::Success(res) = res { if let Value::Number(num) = res.result { num.as_u64().unwrap() } else { panic!("Expected number"); } } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!( minimum_balance, bank.get_minimum_balance_for_rent_exemption(data_len) ); } #[test] fn test_rpc_get_inflation() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationGovernor"}"#; let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let inflation_governor: RpcInflationGovernor = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; let expected_inflation_governor: RpcInflationGovernor = bank.inflation().into(); assert_eq!(inflation_governor, expected_inflation_governor); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationRate"}"#; // Queries current epoch let rep = io.handle_request_sync(req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let inflation_rate: RpcInflationRate = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; let inflation = bank.inflation(); let epoch = bank.epoch(); let slot_in_year = bank.slot_in_year_for_inflation(); let expected_inflation_rate = RpcInflationRate { total: inflation.total(slot_in_year), validator: inflation.validator(slot_in_year), foundation: inflation.foundation(slot_in_year), epoch, }; assert_eq!(inflation_rate, expected_inflation_rate); } #[test] fn test_rpc_get_epoch_schedule() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getEpochSchedule"}"#; let rep = io.handle_request_sync(req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let epoch_schedule: EpochSchedule = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!(epoch_schedule, *bank.epoch_schedule()); } #[test] fn test_rpc_get_leader_schedule() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); for req in [ r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [0]}"#, r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule"}"#, &format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [null, {{ "identity": "{}" }}]}}"#, bank.collector_id() ), &format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [{{ "identity": "{}" }}]}}"#, bank.collector_id() ), ] .iter() { let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success for {}", req); } } else { panic!("Expected single response"); }; let schedule = schedule.expect("leader schedule"); let bob_schedule = schedule .get(&bank.collector_id().to_string()) .expect("leader not in the leader schedule"); assert_eq!( bob_schedule.len(), solana_ledger::leader_schedule_utils::leader_schedule(bank.epoch(), &bank) .unwrap() .get_slot_leaders() .len() ); } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [42424242]}"#; let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!(schedule, None); // `bob` is not in the leader schedule, look for an empty response let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [{{ "identity": "{}"}}]}}"#, bob_pubkey ); let rep = io.handle_request_sync(&req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!(schedule, Some(HashMap::default())); } #[test] fn test_rpc_get_slot_leaders() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); // Test that slot leaders will be returned across epochs let query_start = 0; let query_limit = 2 * bank.epoch_schedule().slots_per_epoch; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#, query_start, query_limit ); let rep = io.handle_request_sync(&req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let slot_leaders: Vec<String> = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success for {} but received: {:?}", req, res); } } else { panic!("Expected single response"); }; assert_eq!(slot_leaders.len(), query_limit as usize); // Test that invalid limit returns an error let query_start = 0; let query_limit = 5001; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#, query_start, query_limit ); let rep = io.handle_request_sync(&req, meta.clone()); let res: Value = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); assert!(res.get("error").is_some()); // Test that invalid epoch returns an error let query_start = 2 * bank.epoch_schedule().slots_per_epoch; let query_limit = 10; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#, query_start, query_limit ); let rep = io.handle_request_sync(&req, meta); let res: Value = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); assert!(res.get("error").is_some()); } #[test] fn test_rpc_get_account_info() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#, bob_pubkey ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "owner": "11111111111111111111111111111111", "lamports": 20, "data": "", "executable": false, "rentEpoch": 0 }, }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); let address = solana_sdk::pubkey::new_rand(); let data = vec![1, 2, 3, 4, 5]; let mut account = AccountSharedData::new(42, 5, &Pubkey::default()); account.set_data(data.clone()); bank.store_account(&address, &account); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding":"base64"}}]}}"#, address ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], json!([base64::encode(&data), "base64"]), ); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding":"base64", "dataSlice": {{"length": 2, "offset": 1}}}}]}}"#, address ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], json!([base64::encode(&data[1..3]), "base64"]), ); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding":"binary", "dataSlice": {{"length": 2, "offset": 1}}}}]}}"#, address ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], bs58::encode(&data[1..3]).into_string(), ); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding":"jsonParsed", "dataSlice": {{"length": 2, "offset": 1}}}}]}}"#, address ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); result["error"].as_object().unwrap(); } #[test] fn test_rpc_get_multiple_accounts() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let address = Pubkey::new(&[9; 32]); let data = vec![1, 2, 3, 4, 5]; let mut account = AccountSharedData::new(42, 5, &Pubkey::default()); account.set_data(data.clone()); bank.store_account(&address, &account); let non_existent_address = Pubkey::new(&[8; 32]); // Test 3 accounts, one non-existent, and one with data let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[["{}", "{}", "{}"]]}}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":[{ "owner": "11111111111111111111111111111111", "lamports": 20, "data": ["", "base64"], "executable": false, "rentEpoch": 0 }, null, { "owner": "11111111111111111111111111111111", "lamports": 42, "data": [base64::encode(&data), "base64"], "executable": false, "rentEpoch": 0 }], }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Test config settings still work with multiple accounts let req = format!( r#"{{ "jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[ ["{}", "{}", "{}"], {{"encoding":"base58"}} ] }}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(result["result"]["value"].as_array().unwrap().len(), 3); assert_eq!( result["result"]["value"][2]["data"], json!([bs58::encode(&data).into_string(), "base58"]), ); let req = format!( r#"{{ "jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[ ["{}", "{}", "{}"], {{"encoding":"base64", "dataSlice": {{"length": 2, "offset": 1}}}} ] }}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(result["result"]["value"].as_array().unwrap().len(), 3); assert_eq!( result["result"]["value"][2]["data"], json!([base64::encode(&data[1..3]), "base64"]), ); let req = format!( r#"{{ "jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[ ["{}", "{}", "{}"], {{"encoding":"binary", "dataSlice": {{"length": 2, "offset": 1}}}} ] }}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(result["result"]["value"].as_array().unwrap().len(), 3); assert_eq!( result["result"]["value"][2]["data"], bs58::encode(&data[1..3]).into_string(), ); let req = format!( r#"{{ "jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[ ["{}", "{}", "{}"], {{"encoding":"jsonParsed", "dataSlice": {{"length": 2, "offset": 1}}}} ] }}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); result["error"].as_object().unwrap(); } #[test] fn test_rpc_get_program_accounts() { let bob = Keypair::new(); let RpcHandler { io, meta, bank, blockhash, alice, .. } = start_rpc_handler_with_tx(&bob.pubkey()); let new_program_id = solana_sdk::pubkey::new_rand(); let tx = system_transaction::assign(&bob, blockhash, &new_program_id); bank.process_transaction(&tx).unwrap(); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getProgramAccounts","params":["{}"]}}"#, new_program_id ); let res = io.handle_request_sync(&req, meta.clone()); let expected = format!( r#"{{ "jsonrpc":"2.0", "result":[ {{ "pubkey": "{}", "account": {{ "owner": "{}", "lamports": 20, "data": "", "executable": false, "rentEpoch": 0 }} }} ], "id":1}} "#, bob.pubkey(), new_program_id ); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Test returns context let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{ "withContext": true }}] }}"#, system_program::id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")).unwrap(); let contains_slot = result["result"]["context"] .as_object() .expect("must contain context") .contains_key("slot"); assert!(contains_slot); // Set up nonce accounts to test filters let nonce_keypair0 = Keypair::new(); let instruction = system_instruction::create_nonce_account( &alice.pubkey(), &nonce_keypair0.pubkey(), &bob.pubkey(), 100_000, ); let message = Message::new(&instruction, Some(&alice.pubkey())); let tx = Transaction::new(&[&alice, &nonce_keypair0], message, blockhash); bank.process_transaction(&tx).unwrap(); let nonce_keypair1 = Keypair::new(); let authority = solana_sdk::pubkey::new_rand(); let instruction = system_instruction::create_nonce_account( &alice.pubkey(), &nonce_keypair1.pubkey(), &authority, 100_000, ); let message = Message::new(&instruction, Some(&alice.pubkey())); let tx = Transaction::new(&[&alice, &nonce_keypair1], message, blockhash); bank.process_transaction(&tx).unwrap(); // Test memcmp filter; filter on Initialized state let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "memcmp": {{"offset": 4,"bytes": "{}"}} }} ]}}] }}"#, system_program::id(), bs58::encode(vec![1]).into_string(), ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 2); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "memcmp": {{"offset": 0,"bytes": "{}"}} }} ]}}] }}"#, system_program::id(), bs58::encode(vec![1]).into_string(), ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 0); // Test dataSize filter let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "dataSize": {} }} ]}}] }}"#, system_program::id(), nonce::State::size(), ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 2); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "dataSize": 1 }} ]}}] }}"#, system_program::id(), ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 0); // Test multiple filters let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "memcmp": {{"offset": 4,"bytes": "{}"}} }}, {{ "memcmp": {{"offset": 8,"bytes": "{}"}} }} ]}}] }}"#, system_program::id(), bs58::encode(vec![1]).into_string(), authority, ); // Filter on Initialized and Nonce authority let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 1); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "memcmp": {{"offset": 4,"bytes": "{}"}} }}, {{ "dataSize": 1 }} ]}}] }}"#, system_program::id(), bs58::encode(vec![1]).into_string(), ); // Filter on Initialized and non-matching data size let res = io.handle_request_sync(&req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 0); } #[test] fn test_rpc_simulate_transaction() { let RpcHandler { io, meta, blockhash, alice, bank, .. } = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand()); let bob_pubkey = solana_sdk::pubkey::new_rand(); let mut tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash); let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); tx.signatures[0] = Signature::default(); let tx_badsig_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); tx.message.recent_blockhash = Hash::default(); let tx_invalid_recent_blockhash = bs58::encode(serialize(&tx).unwrap()).into_string(); bank.freeze(); // Ensure the root bank is frozen, `start_rpc_handler_with_tx()` doesn't do this // Good signature with sigVerify=true let req = format!( r#"{{"jsonrpc":"2.0", "id":1, "method":"simulateTransaction", "params":[ "{}", {{ "sigVerify": true, "accounts": {{ "encoding": "jsonParsed", "addresses": ["{}", "{}"] }} }} ] }}"#, tx_serialized_encoded, solana_sdk::pubkey::new_rand(), bob_pubkey, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "accounts": [ null, { "data": ["", "base64"], "executable": false, "owner": "11111111111111111111111111111111", "lamports": 1234, "rentEpoch": 0 } ], "err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ], "unitsConsumed":0 } }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Too many input accounts... let req = format!( r#"{{"jsonrpc":"2.0", "id":1, "method":"simulateTransaction", "params":[ "{}", {{ "sigVerify": true, "accounts": {{ "addresses": [ "11111111111111111111111111111111", "11111111111111111111111111111111", "11111111111111111111111111111111", "11111111111111111111111111111111" ] }} }} ] }}"#, tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc":"2.0", "error": { "code": error::ErrorCode::InvalidParams.code(), "message": "Too many accounts provided; max 3" }, "id":1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Bad signature with sigVerify=true let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#, tx_badsig_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc":"2.0", "error": { "code": -32003, "message": "Transaction signature verification failure" }, "id":1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Bad signature with sigVerify=false let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": false}}]}}"#, tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "accounts":null, "err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ], "unitsConsumed":0 } }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Bad signature with default sigVerify setting (false) let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}"]}}"#, tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "accounts":null, "err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ], "unitsConsumed":0 } }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Enabled both sigVerify=true and replaceRecentBlockhash=true let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {}]}}"#, tx_serialized_encoded, json!({ "sigVerify": true, "replaceRecentBlockhash": true, }) ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc":"2.0", "error": { "code": ErrorCode::InvalidParams, "message": "sigVerify may not be used with replaceRecentBlockhash" }, "id":1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Bad recent blockhash with replaceRecentBlockhash=false let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"replaceRecentBlockhash": false}}]}}"#, tx_invalid_recent_blockhash, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc":"2.0", "result": { "context":{"slot":0}, "value":{ "err":"BlockhashNotFound", "accounts":null, "logs":[], "unitsConsumed":0 } }, "id":1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Bad recent blockhash with replaceRecentBlockhash=true let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"replaceRecentBlockhash": true}}]}}"#, tx_invalid_recent_blockhash, ); let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "accounts":null, "err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ], "unitsConsumed":0 } }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] #[should_panic] fn test_rpc_simulate_transaction_panic_on_unfrozen_bank() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, blockhash, alice, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash); let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); assert!(!bank.is_frozen()); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#, tx_serialized_encoded, ); // should panic because `bank` is not frozen let _ = io.handle_request_sync(&req, meta); } #[test] fn test_rpc_get_signature_statuses() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, mut meta, blockhash, alice, confirmed_block_signatures, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, confirmed_block_signatures[0] ); let res = io.handle_request_sync(&req, meta.clone()); let expected_res: transaction::Result<()> = Ok(()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let result: Option<TransactionStatus> = serde_json::from_value(json["result"]["value"][0].clone()) .expect("actual response deserialization"); let result = result.as_ref().unwrap(); assert_eq!(expected_res, result.status); assert_eq!(None, result.confirmations); // Test getSignatureStatus request on unprocessed tx let tx = system_transaction::transfer(&alice, &bob_pubkey, 10, blockhash); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, tx.signatures[0] ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let result: Option<TransactionStatus> = serde_json::from_value(json["result"]["value"][0].clone()) .expect("actual response deserialization"); assert!(result.is_none()); // Test getSignatureStatus request on a TransactionError let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, confirmed_block_signatures[1] ); let res = io.handle_request_sync(&req, meta.clone()); let expected_res: transaction::Result<()> = Err(TransactionError::InstructionError( 0, InstructionError::Custom(1), )); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let result: Option<TransactionStatus> = serde_json::from_value(json["result"]["value"][0].clone()) .expect("actual response deserialization"); assert_eq!(expected_res, result.as_ref().unwrap().status); // disable rpc-tx-history, but attempt historical query meta.config.enable_rpc_transaction_history = false; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"], {{"searchTransactionHistory": true}}]}}"#, confirmed_block_signatures[1] ); let res = io.handle_request_sync(&req, meta); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32011,"message":"Transaction history is not available from this node"},"id":1}"#.to_string(), ) ); } #[test] fn test_rpc_get_recent_blockhash() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, blockhash, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}"#; let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "blockhash": blockhash.to_string(), "feeCalculator": { "lamportsPerSignature": 0, } }}, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_fees() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, blockhash, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFees"}"#; let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "blockhash": blockhash.to_string(), "feeCalculator": { "lamportsPerSignature": 0, }, "lastValidSlot": MAX_RECENT_BLOCKHASHES, "lastValidBlockHeight": MAX_RECENT_BLOCKHASHES, }}, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_fee_calculator_for_blockhash() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let blockhash = bank.last_blockhash(); let lamports_per_signature = bank.get_lamports_per_signature(); let fee_calculator = RpcFeeCalculator { fee_calculator: FeeCalculator::new(lamports_per_signature), }; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getFeeCalculatorForBlockhash","params":["{:?}"]}}"#, blockhash ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":fee_calculator, }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Expired (non-existent) blockhash let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getFeeCalculatorForBlockhash","params":["{:?}"]}}"#, Hash::default() ); let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":Value::Null, }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_fee_rate_governor() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFeeRateGovernor"}"#; let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "feeRateGovernor": { "burnPercent": DEFAULT_BURN_PERCENT, "maxLamportsPerSignature": 0, "minLamportsPerSignature": 0, "targetLamportsPerSignature": 0, "targetSignaturesPerSlot": 0 } }}, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_fail_request_airdrop() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); // Expect internal error because no faucet is available let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"requestAirdrop","params":["{}", 50]}}"#, bob_pubkey ); let res = io.handle_request_sync(&req, meta); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"},"id":1}"#; let expected: Response = serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_send_bad_tx() { let genesis = create_genesis_config(100); let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified); let mut io = MetaIoHandler::default(); io.extend_with(rpc_full::FullImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["37u9WtQpcm6ULa3Vmu7ySnANv"]}"#; let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let error = &json["error"]; assert_eq!(error["code"], ErrorCode::InvalidParams.code()); } #[test] fn test_rpc_send_transaction_preflight() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); let ledger_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let (bank_forks, mint_keypair, ..) = new_bank_forks(); let health = RpcHealth::stub(); // Freeze bank 0 to prevent a panic in `run_transaction_simulation()` bank_forks.write().unwrap().get(0).unwrap().freeze(); let mut io = MetaIoHandler::default(); io.extend_with(rpc_full::FullImpl.to_delegate()); let cluster_info = Arc::new(ClusterInfo::new( ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")), Arc::new(Keypair::new()), SocketAddrSpace::Unspecified, )); let tpu_address = cluster_info.my_contact_info().tpu; let (meta, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, bank_forks.clone(), block_commitment_cache, blockstore, validator_exit, health.clone(), cluster_info, Hash::default(), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), Arc::new(MaxSlots::default()), Arc::new(LeaderScheduleCache::default()), Arc::new(AtomicU64::default()), ); SendTransactionService::new::<NullTpuInfo>( tpu_address, &bank_forks, None, receiver, 1000, 1, ); let mut bad_transaction = system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 42, Hash::default(), ); // sendTransaction will fail because the blockhash is invalid let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"accounts":null,"err":"BlockhashNotFound","logs":[],"unitsConsumed":0}},"id":1}"#.to_string(), ) ); // sendTransaction will fail due to insanity bad_transaction.message.instructions[0].program_id_index = 0u8; let recent_blockhash = bank_forks.read().unwrap().root_bank().last_blockhash(); bad_transaction.sign(&[&mint_keypair], recent_blockhash); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid transaction: Transaction failed to sanitize accounts offsets correctly"},"id":1}"#.to_string(), ) ); let mut bad_transaction = system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 42, recent_blockhash, ); // sendTransaction will fail due to poor node health health.stub_set_health_status(Some(RpcHealthStatus::Behind { num_slots: 42 })); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32005,"message":"Node is behind by 42 slots","data":{"numSlotsBehind":42}},"id":1}"#.to_string(), ) ); health.stub_set_health_status(None); // sendTransaction will fail due to invalid signature bad_transaction.signatures[0] = Signature::default(); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Transaction signature verification failure"},"id":1}"#.to_string(), ) ); // sendTransaction will now succeed because skipPreflight=true even though it's a bad // transaction let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}", {{"skipPreflight": true}}]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","result":"1111111111111111111111111111111111111111111111111111111111111111","id":1}"#.to_string(), ) ); // sendTransaction will fail due to sanitization failure bad_transaction.signatures.clear(); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid transaction: Transaction failed to sanitize accounts offsets correctly"},"id":1}"#.to_string(), ) ); } #[test] fn test_rpc_verify_filter() { let filter = RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58( "13LeFbG6m2EP1fqCj9k66fcXsoTHMMtgr7c78AivUrYD".to_string(), ), encoding: None, }); assert_eq!(verify_filter(&filter), Ok(())); // Invalid base-58 let filter = RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58("III".to_string()), encoding: None, }); assert!(verify_filter(&filter).is_err()); } #[test] fn test_rpc_verify_pubkey() { let pubkey = solana_sdk::pubkey::new_rand(); assert_eq!(verify_pubkey(&pubkey.to_string()).unwrap(), pubkey); let bad_pubkey = "a1b2c3d4"; assert_eq!( verify_pubkey(&bad_pubkey.to_string()), Err(Error::invalid_params("Invalid param: WrongSize")) ); } #[test] fn test_rpc_verify_signature() { let tx = system_transaction::transfer( &Keypair::new(), &solana_sdk::pubkey::new_rand(), 20, hash(&[0]), ); assert_eq!( verify_signature(&tx.signatures[0].to_string()).unwrap(), tx.signatures[0] ); let bad_signature = "a1b2c3d4"; assert_eq!( verify_signature(&bad_signature.to_string()), Err(Error::invalid_params("Invalid param: WrongSize")) ); } fn new_bank_forks() -> (Arc<RwLock<BankForks>>, Keypair, Arc<Keypair>) { let GenesisConfigInfo { mut genesis_config, mint_keypair, voting_keypair, } = create_genesis_config(TEST_MINT_LAMPORTS); genesis_config.rent.lamports_per_byte_year = 50; genesis_config.rent.exemption_threshold = 2.0; genesis_config.epoch_schedule = EpochSchedule::custom(TEST_SLOTS_PER_EPOCH, TEST_SLOTS_PER_EPOCH, false); let bank = Bank::new_for_tests(&genesis_config); ( Arc::new(RwLock::new(BankForks::new(bank))), mint_keypair, Arc::new(voting_keypair), ) } #[test] fn test_rpc_get_identity() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, alice, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getIdentity"}"#; let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "identity": alice.pubkey().to_string() }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } fn test_basic_slot(method: &str, expected: Slot) { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!("{{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"{}\"}}", method); let res = io.handle_request_sync(&req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, expected); } #[test] fn test_rpc_get_max_slots() { test_basic_slot("getMaxRetransmitSlot", 42); test_basic_slot("getMaxShredInsertSlot", 43); } #[test] fn test_rpc_get_version() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVersion"}"#; let res = io.handle_request_sync(req, meta); let version = solana_version::Version::default(); let expected = json!({ "jsonrpc": "2.0", "result": { "solana-core": version.to_string(), "feature-set": version.feature_set, }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_processor_get_block_commitment() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); let bank_forks = new_bank_forks().0; let ledger_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY + 1]); let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY + 1]); let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new(); block_commitment .entry(0) .or_insert_with(|| commitment_slot0.clone()); block_commitment .entry(1) .or_insert_with(|| commitment_slot1.clone()); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new( block_commitment, 42, CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()), ))); let cluster_info = Arc::new(ClusterInfo::new( ContactInfo::default(), Arc::new(Keypair::new()), SocketAddrSpace::Unspecified, )); let tpu_address = cluster_info.my_contact_info().tpu; let (request_processor, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, bank_forks.clone(), block_commitment_cache, blockstore, validator_exit, RpcHealth::stub(), cluster_info, Hash::default(), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), Arc::new(MaxSlots::default()), Arc::new(LeaderScheduleCache::default()), Arc::new(AtomicU64::default()), ); SendTransactionService::new::<NullTpuInfo>( tpu_address, &bank_forks, None, receiver, 1000, 1, ); assert_eq!( request_processor.get_block_commitment(0), RpcBlockCommitment { commitment: Some(commitment_slot0.commitment), total_stake: 42, } ); assert_eq!( request_processor.get_block_commitment(1), RpcBlockCommitment { commitment: Some(commitment_slot1.commitment), total_stake: 42, } ); assert_eq!( request_processor.get_block_commitment(2), RpcBlockCommitment { commitment: None, total_stake: 42, } ); } #[test] fn test_rpc_get_block_commitment() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, block_commitment_cache, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[0]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let RpcBlockCommitment { commitment, total_stake, } = if let Response::Single(res) = result { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!( commitment, block_commitment_cache .read() .unwrap() .get_block_commitment(0) .map(|block_commitment| block_commitment.commitment) ); assert_eq!(total_stake, 10); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}"#; let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let commitment_response: RpcBlockCommitment<BlockCommitmentArray> = if let Response::Single(res) = result { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!(commitment_response.commitment, None); assert_eq!(commitment_response.total_stake, 10); } #[test] fn test_get_block() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, mut meta, confirmed_block_signatures, blockhash, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option<EncodedConfirmedBlock> = serde_json::from_value(result["result"].clone()).unwrap(); let confirmed_block = confirmed_block.unwrap(); assert_eq!(confirmed_block.transactions.len(), 3); assert_eq!(confirmed_block.rewards, vec![]); for EncodedTransactionWithStatusMeta { transaction, meta } in confirmed_block.transactions.into_iter() { if let EncodedTransaction::Json(transaction) = transaction { if transaction.signatures[0] == confirmed_block_signatures[0].to_string() { let meta = meta.unwrap(); let transaction_recent_blockhash = match transaction.message { UiMessage::Parsed(message) => message.recent_blockhash, UiMessage::Raw(message) => message.recent_blockhash, }; assert_eq!(transaction_recent_blockhash, blockhash.to_string()); assert_eq!(meta.status, Ok(())); assert_eq!(meta.err, None); } else if transaction.signatures[0] == confirmed_block_signatures[1].to_string() { let meta = meta.unwrap(); assert_eq!( meta.err, Some(TransactionError::InstructionError( 0, InstructionError::Custom(1) )) ); assert_eq!( meta.status, Err(TransactionError::InstructionError( 0, InstructionError::Custom(1) )) ); } else { assert_eq!(meta, None); } } } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0,"binary"]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option<EncodedConfirmedBlock> = serde_json::from_value(result["result"].clone()).unwrap(); let confirmed_block = confirmed_block.unwrap(); assert_eq!(confirmed_block.transactions.len(), 3); assert_eq!(confirmed_block.rewards, vec![]); for EncodedTransactionWithStatusMeta { transaction, meta } in confirmed_block.transactions.into_iter() { if let EncodedTransaction::LegacyBinary(transaction) = transaction { let decoded_transaction: Transaction = deserialize(&bs58::decode(&transaction).into_vec().unwrap()).unwrap(); if decoded_transaction.signatures[0] == confirmed_block_signatures[0] { let meta = meta.unwrap(); assert_eq!(decoded_transaction.message.recent_blockhash, blockhash); assert_eq!(meta.status, Ok(())); assert_eq!(meta.err, None); } else if decoded_transaction.signatures[0] == confirmed_block_signatures[1] { let meta = meta.unwrap(); assert_eq!( meta.err, Some(TransactionError::InstructionError( 0, InstructionError::Custom(1) )) ); assert_eq!( meta.status, Err(TransactionError::InstructionError( 0, InstructionError::Custom(1) )) ); } else { assert_eq!(meta, None); } } } // disable rpc-tx-history meta.config.enable_rpc_transaction_history = false; let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0]}"#; let res = io.handle_request_sync(req, meta); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32011,"message":"Transaction history is not available from this node"},"id":1}"#.to_string(), ) ); } #[test] fn test_get_block_config() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, confirmed_block_signatures, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0,{}]}}"#, json!(RpcBlockConfig { encoding: None, transaction_details: Some(TransactionDetails::Signatures), rewards: Some(false), commitment: None, }) ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option<UiConfirmedBlock> = serde_json::from_value(result["result"].clone()).unwrap(); let confirmed_block = confirmed_block.unwrap(); assert!(confirmed_block.transactions.is_none()); assert!(confirmed_block.rewards.is_none()); for (i, signature) in confirmed_block.signatures.unwrap()[..2].iter().enumerate() { assert_eq!(*signature, confirmed_block_signatures[i].to_string()); } let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0,{}]}}"#, json!(RpcBlockConfig { encoding: None, transaction_details: Some(TransactionDetails::None), rewards: Some(true), commitment: None, }) ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option<UiConfirmedBlock> = serde_json::from_value(result["result"].clone()).unwrap(); let confirmed_block = confirmed_block.unwrap(); assert!(confirmed_block.transactions.is_none()); assert!(confirmed_block.signatures.is_none()); assert_eq!(confirmed_block.rewards.unwrap(), vec![]); } #[test] fn test_get_block_production() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let roots = vec![0, 1, 3, 4, 8]; let RpcHandler { io, meta, block_commitment_cache, leader_pubkey, .. } = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockProduction","params":[]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let block_production: RpcBlockProduction = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!( block_production.by_identity.get(&leader_pubkey.to_string()), Some(&(9, 5)) ); assert_eq!( block_production.range, RpcBlockProductionRange { first_slot: 0, last_slot: 8 } ); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockProduction","params":[{{"identity": "{}"}}]}}"#, leader_pubkey ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let block_production: RpcBlockProduction = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!( block_production.by_identity.get(&leader_pubkey.to_string()), Some(&(9, 5)) ); assert_eq!( block_production.range, RpcBlockProductionRange { first_slot: 0, last_slot: 8 } ); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockProduction","params":[{{"range": {{"firstSlot": 0, "lastSlot": 4}}, "identity": "{}"}}]}}"#, bob_pubkey ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let block_production: RpcBlockProduction = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!( block_production.by_identity.get(&leader_pubkey.to_string()), None ); assert_eq!( block_production.range, RpcBlockProductionRange { first_slot: 0, last_slot: 4 } ); } #[test] fn test_get_blocks() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let roots = vec![0, 1, 3, 4, 8]; let RpcHandler { io, meta, block_commitment_cache, .. } = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone()); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, roots[1..].to_vec()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[2]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,4]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,7]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[9,11]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, Vec::<Slot>::new()); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(std::u64::MAX); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,{}]}}"#, MAX_GET_CONFIRMED_BLOCKS_RANGE ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4, 8]); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,{}]}}"#, MAX_GET_CONFIRMED_BLOCKS_RANGE + 1 ); let res = io.handle_request_sync(&req, meta); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Slot range too large; max 500000"},"id":1}"#.to_string(), ) ); } #[test] fn test_get_blocks_with_limit() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let roots = vec![0, 1, 3, 4, 8]; let RpcHandler { io, meta, block_commitment_cache, .. } = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[0,500001]}"#; let res = io.handle_request_sync(req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Limit too large; max 500000"},"id":1}"#.to_string(), ) ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[0,0]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert!(confirmed_blocks.is_empty()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,2]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,3]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,500000]}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[9,500000]}"#; let res = io.handle_request_sync(req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, Vec::<Slot>::new()); } #[test] fn test_get_block_time() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, block_commitment_cache, bank_forks, .. } = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, vec![1, 2, 3, 4, 5, 6, 7]); let base_timestamp = bank_forks .read() .unwrap() .get(0) .unwrap() .unix_timestamp_from_genesis(); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(7); let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year()); let slot = 2; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#, slot ); let res = io.handle_request_sync(&req, meta.clone()); let expected = format!(r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#, base_timestamp); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); let slot = 7; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#, slot ); let res = io.handle_request_sync(&req, meta.clone()); let expected = format!( r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#, base_timestamp + (7 * slot_duration).as_secs() as i64 ); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); let slot = 12345; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#, slot ); let res = io.handle_request_sync(&req, meta); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32004,"message":"Block not available for slot 12345"},"id":1}"#; let expected: Response = serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } fn advance_block_commitment_cache( block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>, bank_forks: &Arc<RwLock<BankForks>>, ) { let mut new_block_commitment = BlockCommitmentCache::new( HashMap::new(), 0, CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()), ); let mut w_block_commitment_cache = block_commitment_cache.write().unwrap(); std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment); } #[test] fn test_get_vote_accounts() { let RpcHandler { io, meta, mut bank, bank_forks, alice, leader_vote_keypair, block_commitment_cache, .. } = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand()); assert_eq!(bank.vote_accounts().len(), 1); // Create a vote account with no stake. let alice_vote_keypair = Keypair::new(); let instructions = vote_instruction::create_account( &alice.pubkey(), &alice_vote_keypair.pubkey(), &VoteInit { node_pubkey: alice.pubkey(), authorized_voter: alice_vote_keypair.pubkey(), authorized_withdrawer: alice_vote_keypair.pubkey(), commission: 0, }, bank.get_minimum_balance_for_rent_exemption(VoteState::size_of()), ); let message = Message::new(&instructions, Some(&alice.pubkey())); let transaction = Transaction::new( &[&alice, &alice_vote_keypair], message, bank.last_blockhash(), ); bank.process_transaction(&transaction) .expect("process transaction"); assert_eq!(bank.vote_accounts().len(), 2); // Check getVoteAccounts: the bootstrap validator vote account will be delinquent as it has // stake but has never voted, and the vote account with no stake should not be present. { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts"}"#; let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); assert!(vote_account_status.current.is_empty()); assert_eq!(vote_account_status.delinquent.len(), 1); for vote_account_info in vote_account_status.delinquent { assert_ne!(vote_account_info.activated_stake, 0); } } let mut advance_bank = || { bank.freeze(); // Votes let instructions = [ vote_instruction::vote( &leader_vote_keypair.pubkey(), &leader_vote_keypair.pubkey(), Vote { slots: vec![bank.slot()], hash: bank.hash(), timestamp: None, }, ), vote_instruction::vote( &alice_vote_keypair.pubkey(), &alice_vote_keypair.pubkey(), Vote { slots: vec![bank.slot()], hash: bank.hash(), timestamp: None, }, ), ]; bank = bank_forks.write().unwrap().insert(Bank::new_from_parent( &bank, &Pubkey::default(), bank.slot() + 1, )); advance_block_commitment_cache(&block_commitment_cache, &bank_forks); let transaction = Transaction::new_signed_with_payer( &instructions, Some(&alice.pubkey()), &[&alice, &leader_vote_keypair, &alice_vote_keypair], bank.last_blockhash(), ); bank.process_transaction(&transaction) .expect("process transaction"); }; // Advance bank to the next epoch for _ in 0..TEST_SLOTS_PER_EPOCH { advance_bank(); } let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, json!([CommitmentConfig::processed()]) ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); // The vote account with no stake should not be present. assert!(vote_account_status.delinquent.is_empty()); // Both accounts should be active and have voting history. assert_eq!(vote_account_status.current.len(), 2); let leader_info = vote_account_status .current .iter() .find(|x| x.vote_pubkey == leader_vote_keypair.pubkey().to_string()) .unwrap(); assert_ne!(leader_info.activated_stake, 0); // Subtract one because the last vote always carries over to the next epoch let expected_credits = TEST_SLOTS_PER_EPOCH - MAX_LOCKOUT_HISTORY as u64 - 1; assert_eq!( leader_info.epoch_credits, vec![ (0, expected_credits, 0), (1, expected_credits + 1, expected_credits) // one vote in current epoch ] ); // Filter request based on the leader: { let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, json!([RpcGetVoteAccountsConfig { vote_pubkey: Some(leader_vote_keypair.pubkey().to_string()), commitment: Some(CommitmentConfig::processed()), ..RpcGetVoteAccountsConfig::default() }]) ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(vote_account_status.current.len(), 1); assert_eq!(vote_account_status.delinquent.len(), 0); for vote_account_info in vote_account_status.current { assert_eq!( vote_account_info.vote_pubkey, leader_vote_keypair.pubkey().to_string() ); } } // Overflow the epoch credits history and ensure only `MAX_RPC_EPOCH_CREDITS_HISTORY` // results are returned for _ in 0..(TEST_SLOTS_PER_EPOCH * (MAX_RPC_EPOCH_CREDITS_HISTORY) as u64) { advance_bank(); } let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, json!([CommitmentConfig::processed()]) ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); assert!(vote_account_status.delinquent.is_empty()); assert!(!vote_account_status .current .iter() .any(|x| x.epoch_credits.len() != MAX_RPC_EPOCH_CREDITS_HISTORY)); // Advance bank with no voting bank.freeze(); bank_forks.write().unwrap().insert(Bank::new_from_parent( &bank, &Pubkey::default(), bank.slot() + TEST_SLOTS_PER_EPOCH, )); advance_block_commitment_cache(&block_commitment_cache, &bank_forks); // The leader vote account should now be delinquent, and the other vote account disappears // because it's inactive with no stake { let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, json!([CommitmentConfig::processed()]) ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); assert!(vote_account_status.current.is_empty()); assert_eq!(vote_account_status.delinquent.len(), 1); for vote_account_info in vote_account_status.delinquent { assert_eq!( vote_account_info.vote_pubkey, leader_vote_keypair.pubkey().to_string() ); } } } #[test] fn test_is_finalized() { let bank = Arc::new(Bank::default_for_tests()); let ledger_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); blockstore.set_roots(vec![0, 1].iter()).unwrap(); // Build BlockCommitmentCache with rooted slots let mut cache0 = BlockCommitment::default(); cache0.increase_rooted_stake(50); let mut cache1 = BlockCommitment::default(); cache1.increase_rooted_stake(40); let mut cache2 = BlockCommitment::default(); cache2.increase_rooted_stake(20); let mut block_commitment = HashMap::new(); block_commitment.entry(1).or_insert(cache0); block_commitment.entry(2).or_insert(cache1); block_commitment.entry(3).or_insert(cache2); let highest_confirmed_root = 1; let block_commitment_cache = BlockCommitmentCache::new( block_commitment, 50, CommitmentSlots { slot: bank.slot(), highest_confirmed_root, ..CommitmentSlots::default() }, ); assert!(is_finalized(&block_commitment_cache, &bank, &blockstore, 0)); assert!(is_finalized(&block_commitment_cache, &bank, &blockstore, 1)); assert!(!is_finalized( &block_commitment_cache, &bank, &blockstore, 2 )); assert!(!is_finalized( &block_commitment_cache, &bank, &blockstore, 3 )); } #[test] fn test_token_rpcs() { let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand()); let mut account_data = vec![0; TokenAccount::get_packed_len()]; let mint = SplTokenPubkey::new(&[2; 32]); let owner = SplTokenPubkey::new(&[3; 32]); let delegate = SplTokenPubkey::new(&[4; 32]); let token_account = TokenAccount { mint, owner, delegate: COption::Some(delegate), amount: 420, state: TokenAccountState::Initialized, is_native: COption::None, delegated_amount: 30, close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id(), ..Account::default() }); let token_account_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_account_pubkey, &token_account); // Add the mint let mut mint_data = vec![0; Mint::get_packed_len()]; let mint_state = Mint { mint_authority: COption::Some(owner), supply: 500, decimals: 2, is_initialized: true, freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id(), ..Account::default() }); bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenAccountBalance","params":["{}"]}}"#, token_account_pubkey, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let balance: UiTokenAmount = serde_json::from_value(result["result"]["value"].clone()).unwrap(); let error = f64::EPSILON; assert!((balance.ui_amount.unwrap() - 4.2).abs() < error); assert_eq!(balance.amount, 420.to_string()); assert_eq!(balance.decimals, 2); assert_eq!(balance.ui_amount_string, "4.2".to_string()); // Test non-existent token account let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenAccountBalance","params":["{}"]}}"#, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); // Test get token supply, pulls supply from mint let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenSupply","params":["{}"]}}"#, mint, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let supply: UiTokenAmount = serde_json::from_value(result["result"]["value"].clone()).unwrap(); let error = f64::EPSILON; assert!((supply.ui_amount.unwrap() - 5.0).abs() < error); assert_eq!(supply.amount, 500.to_string()); assert_eq!(supply.decimals, 2); assert_eq!(supply.ui_amount_string, "5".to_string()); // Test non-existent mint address let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenSupply","params":["{}"]}}"#, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); // Add another token account with the same owner, delegate, and mint let other_token_account_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&other_token_account_pubkey, &token_account); // Add another token account with the same owner and delegate but different mint let mut account_data = vec![0; TokenAccount::get_packed_len()]; let new_mint = SplTokenPubkey::new(&[5; 32]); let token_account = TokenAccount { mint: new_mint, owner, delegate: COption::Some(delegate), amount: 42, state: TokenAccountState::Initialized, is_native: COption::None, delegated_amount: 30, close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id(), ..Account::default() }); let token_with_different_mint_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_with_different_mint_pubkey, &token_account); // Test getTokenAccountsByOwner with Token program id returns all accounts, regardless of Mint address let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"programId": "{}"}}] }}"#, owner, spl_token_id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 3); // Test getTokenAccountsByOwner with jsonParsed encoding doesn't return accounts with invalid mints let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"programId": "{}"}}, {{"encoding": "jsonParsed"}}] }}"#, owner, spl_token_id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 2); // Test getProgramAccounts with jsonParsed encoding returns mints, but doesn't return accounts with invalid mints let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}", {{"encoding": "jsonParsed"}}] }}"#, spl_token_id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(accounts.len(), 4); // Test returns only mint accounts let req = format!( r#"{{ "jsonrpc":"2.0", "id":1,"method":"getTokenAccountsByOwner", "params":["{}", {{"mint": "{}"}}] }}"#, owner, mint, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 2); // Test non-existent Mint/program id let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"programId": "{}"}}] }}"#, owner, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"mint": "{}"}}] }}"#, owner, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); // Test non-existent Owner let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"programId": "{}"}}] }}"#, solana_sdk::pubkey::new_rand(), spl_token_id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert!(accounts.is_empty()); // Test getTokenAccountsByDelegate with Token program id returns all accounts, regardless of Mint address let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params":["{}", {{"programId": "{}"}}] }}"#, delegate, spl_token_id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 3); // Test returns only mint accounts let req = format!( r#"{{ "jsonrpc":"2.0", "id":1,"method": "getTokenAccountsByDelegate", "params":["{}", {{"mint": "{}"}}] }}"#, delegate, mint, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 2); // Test non-existent Mint/program id let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params":["{}", {{"programId": "{}"}}] }}"#, delegate, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params":["{}", {{"mint": "{}"}}] }}"#, delegate, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); // Test non-existent Delegate let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params":["{}", {{"programId": "{}"}}] }}"#, solana_sdk::pubkey::new_rand(), spl_token_id(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert!(accounts.is_empty()); // Add new_mint, and another token account on new_mint with different balance let mut mint_data = vec![0; Mint::get_packed_len()]; let mint_state = Mint { mint_authority: COption::Some(owner), supply: 500, decimals: 2, is_initialized: true, freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id(), ..Account::default() }); bank.store_account( &Pubkey::from_str(&new_mint.to_string()).unwrap(), &mint_account, ); let mut account_data = vec![0; TokenAccount::get_packed_len()]; let token_account = TokenAccount { mint: new_mint, owner, delegate: COption::Some(delegate), amount: 10, state: TokenAccountState::Initialized, is_native: COption::None, delegated_amount: 30, close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id(), ..Account::default() }); let token_with_smaller_balance = solana_sdk::pubkey::new_rand(); bank.store_account(&token_with_smaller_balance, &token_account); // Test largest token accounts let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenLargestAccounts","params":["{}"]}}"#, new_mint, ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let largest_accounts: Vec<RpcTokenAccountBalance> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!( largest_accounts, vec![ RpcTokenAccountBalance { address: token_with_different_mint_pubkey.to_string(), amount: UiTokenAmount { ui_amount: Some(0.42), decimals: 2, amount: "42".to_string(), ui_amount_string: "0.42".to_string(), } }, RpcTokenAccountBalance { address: token_with_smaller_balance.to_string(), amount: UiTokenAmount { ui_amount: Some(0.1), decimals: 2, amount: "10".to_string(), ui_amount_string: "0.1".to_string(), } } ] ); } #[test] fn test_token_parsing() { let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand()); let mut account_data = vec![0; TokenAccount::get_packed_len()]; let mint = SplTokenPubkey::new(&[2; 32]); let owner = SplTokenPubkey::new(&[3; 32]); let delegate = SplTokenPubkey::new(&[4; 32]); let token_account = TokenAccount { mint, owner, delegate: COption::Some(delegate), amount: 420, state: TokenAccountState::Initialized, is_native: COption::Some(10), delegated_amount: 30, close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id(), ..Account::default() }); let token_account_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_account_pubkey, &token_account); // Add the mint let mut mint_data = vec![0; Mint::get_packed_len()]; let mint_state = Mint { mint_authority: COption::Some(owner), supply: 500, decimals: 2, is_initialized: true, freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id(), ..Account::default() }); bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding": "jsonParsed"}}]}}"#, token_account_pubkey, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], json!({ "program": "spl-token", "space": TokenAccount::get_packed_len(), "parsed": { "type": "account", "info": { "mint": mint.to_string(), "owner": owner.to_string(), "tokenAmount": { "uiAmount": 4.2, "decimals": 2, "amount": "420", "uiAmountString": "4.2", }, "delegate": delegate.to_string(), "state": "initialized", "isNative": true, "rentExemptReserve": { "uiAmount": 0.1, "decimals": 2, "amount": "10", "uiAmountString": "0.1", }, "delegatedAmount": { "uiAmount": 0.3, "decimals": 2, "amount": "30", "uiAmountString": "0.3", }, "closeAuthority": owner.to_string(), } } }) ); // Test Mint let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding": "jsonParsed"}}]}}"#, mint, ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], json!({ "program": "spl-token", "space": Mint::get_packed_len(), "parsed": { "type": "mint", "info": { "mintAuthority": owner.to_string(), "decimals": 2, "supply": "500".to_string(), "isInitialized": true, "freezeAuthority": owner.to_string(), } } }) ); } #[test] fn test_get_spl_token_owner_filter() { let owner = Pubkey::new_unique(); assert_eq!( get_spl_token_owner_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ RpcFilterType::Memcmp(Memcmp { offset: 32, bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), encoding: None }), RpcFilterType::DataSize(165) ], ) .unwrap(), owner ); // Filtering on mint instead of owner assert!(get_spl_token_owner_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), encoding: None }), RpcFilterType::DataSize(165) ], ) .is_none()); // Wrong program id assert!(get_spl_token_owner_filter( &Pubkey::new_unique(), &[ RpcFilterType::Memcmp(Memcmp { offset: 32, bytes: MemcmpEncodedBytes::Bytes(owner.to_bytes().to_vec()), encoding: None }), RpcFilterType::DataSize(165) ], ) .is_none()); } #[test] fn test_rpc_single_gossip() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); let ledger_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let cluster_info = Arc::new(ClusterInfo::new( ContactInfo::default(), Arc::new(Keypair::new()), SocketAddrSpace::Unspecified, )); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let bank1 = bank_forks.read().unwrap().get(1).unwrap().clone(); let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2); bank_forks.write().unwrap().insert(bank2); let bank2 = bank_forks.read().unwrap().get(2).unwrap().clone(); let bank3 = Bank::new_from_parent(&bank2, &Pubkey::default(), 3); bank_forks.write().unwrap().insert(bank3); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let mut pending_optimistically_confirmed_banks = HashSet::new(); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, bank_forks.clone(), block_commitment_cache.clone(), optimistically_confirmed_bank.clone(), )); let (meta, _receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, bank_forks.clone(), block_commitment_cache, blockstore, validator_exit, RpcHealth::stub(), cluster_info, Hash::default(), None, optimistically_confirmed_bank.clone(), Arc::new(RwLock::new(LargestAccountsCache::new(30))), Arc::new(MaxSlots::default()), Arc::new(LeaderScheduleCache::default()), Arc::new(AtomicU64::default()), ); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); io.extend_with(rpc_full::FullImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"confirmed"}]}"#; let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 0); let mut highest_confirmed_slot: Slot = 0; let mut last_notified_confirmed_slot: Slot = 0; OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(2), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, &mut last_notified_confirmed_slot, &mut highest_confirmed_slot, &None, ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); // Test rollback does not appear to happen, even if slots are notified out of order OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(1), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, &mut last_notified_confirmed_slot, &mut highest_confirmed_slot, &None, ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); // Test bank will only be cached when frozen OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(3), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, &mut last_notified_confirmed_slot, &mut highest_confirmed_slot, &None, ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); // Test freezing an optimistically confirmed bank will update cache let bank3 = bank_forks.read().unwrap().get(3).unwrap().clone(); OptimisticallyConfirmedBankTracker::process_notification( BankNotification::Frozen(bank3), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, &mut last_notified_confirmed_slot, &mut highest_confirmed_slot, &None, ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 3); } #[test] fn test_worst_case_encoded_tx_goldens() { let ff_tx = vec![0xffu8; PACKET_DATA_SIZE]; let tx58 = bs58::encode(&ff_tx).into_string(); assert_eq!(tx58.len(), MAX_BASE58_SIZE); let tx64 = base64::encode(&ff_tx); assert_eq!(tx64.len(), MAX_BASE64_SIZE); } #[test] fn test_decode_and_deserialize_too_large_payloads_fail() { // +2 because +1 still fits in base64 encoded worst-case let too_big = PACKET_DATA_SIZE + 2; let tx_ser = vec![0xffu8; too_big]; let tx58 = bs58::encode(&tx_ser).into_string(); let tx58_len = tx58.len(); let expect58 = Error::invalid_params(format!( "encoded solana_sdk::transaction::Transaction too large: {} bytes (max: encoded/raw {}/{})", tx58_len, MAX_BASE58_SIZE, PACKET_DATA_SIZE, )); assert_eq!( decode_and_deserialize::<Transaction>(tx58, UiTransactionEncoding::Base58).unwrap_err(), expect58 ); let tx64 = base64::encode(&tx_ser); let tx64_len = tx64.len(); let expect64 = Error::invalid_params(format!( "encoded solana_sdk::transaction::Transaction too large: {} bytes (max: encoded/raw {}/{})", tx64_len, MAX_BASE64_SIZE, PACKET_DATA_SIZE, )); assert_eq!( decode_and_deserialize::<Transaction>(tx64, UiTransactionEncoding::Base64).unwrap_err(), expect64 ); let too_big = PACKET_DATA_SIZE + 1; let tx_ser = vec![0x00u8; too_big]; let tx58 = bs58::encode(&tx_ser).into_string(); let expect = Error::invalid_params(format!( "encoded solana_sdk::transaction::Transaction too large: {} bytes (max: {} bytes)", too_big, PACKET_DATA_SIZE )); assert_eq!( decode_and_deserialize::<Transaction>(tx58, UiTransactionEncoding::Base58).unwrap_err(), expect ); let tx64 = base64::encode(&tx_ser); assert_eq!( decode_and_deserialize::<Transaction>(tx64, UiTransactionEncoding::Base64).unwrap_err(), expect ); } #[test] fn test_sanitize_unsanitary() { let unsanitary_tx58 = "ju9xZWuDBX4pRxX2oZkTjxU5jB4SSTgEGhX8bQ8PURNzyzqKMPPpNvWihx8zUe\ FfrbVNoAaEsNKZvGzAnTDy5bhNT9kt6KFCTBixpvrLCzg4M5UdFUQYrn1gdgjX\ pLHxcaShD81xBNaFDgnA2nkkdHnKtZt4hVSfKAmw3VRZbjrZ7L2fKZBx21CwsG\ hD6onjM2M3qZW5C8J6d1pj41MxKmZgPBSha3MyKkNLkAGFASK" .to_string(); let unsanitary_versioned_tx = decode_and_deserialize::<VersionedTransaction>( unsanitary_tx58, UiTransactionEncoding::Base58, ) .unwrap() .1; let expect58 = Error::invalid_params( "invalid transaction: Transaction failed to sanitize accounts offsets correctly" .to_string(), ); assert_eq!( sanitize_transaction(unsanitary_versioned_tx).unwrap_err(), expect58 ); } }
38.771357
224
0.546524
29a3b8084378d717388510a08689d3f68bd85385
1,021
//! Provides the [`GeneralIntegrator`](crate::GeneralIntegrator) trait #[doc(hidden)] mod integrate; #[doc(hidden)] mod runge_kutta_4th; #[cfg(test)] mod test_method; use anyhow::{self, Context}; use nalgebra::DVector; use numeric_literals::replace_float_literals; use crate::prepare::prepare; use crate::{Float, Result, ResultExt, Token}; pub(self) use integrate::integrate; pub(self) use runge_kutta_4th::runge_kutta_4th; /// General integrators pub enum Integrators { /// 4th-order Runge-Kutta method RungeKutta4th, } /// A general integrator for a system of 1st-order ODEs pub trait Integrator<F: Float> { /// Update the current state as defined by a /// system of 1st-order ODEs, return the result /// /// Arguments: /// * `t` --- Current time moment; /// * `x` --- Current state of the system. fn update(&self, t: F, x: &[F]) -> anyhow::Result<Vec<F>>; // The rest of the methods are defined by these macros integrate!(); prepare!(); runge_kutta_4th!(); }
24.902439
70
0.674829
33e066148a1a9d5b674835a4debdcf9a891c073f
2,358
// Definition for singly-linked list. #[derive(PartialEq, Eq, Clone, Debug)] pub struct ListNode { pub val: i32, pub next: Option<Box<ListNode>> } impl ListNode { #[inline] fn new(val: i32) -> Self { ListNode { next: None, val } } } struct Solution; impl Solution { pub fn add_two_numbers(l1: Option<Box<ListNode>>, l2: Option<Box<ListNode>>) -> Option<Box<ListNode>> { // 构建一个链表用来存放 l1 和 l2 两个链表相加的结果 // 其中 dummy 这个节点为虚拟头结点 let mut dummy = ListNode::new(0); // 设置一个进位,初始化为 0 // 两个个位数相加,进位只能是 1 或者 0 // 比如 7 + 8 = 15,进位是 1 // 比如 2 + 3 = 6,没有进位,或者说进位是 0 let mut carry_bit = 0; let (mut p, mut q) = (l1, l2); let (mut v1, mut v2) = (Vec::new(), Vec::new()); while p.is_some() || q.is_some() { // 获取 l1 链表中节点的值 if let Some(v) = p { v1.push(v.val); p = v.next; } // 获取 l2 链表中节点的值 if let Some(v) = q { v2.push(v.val); q = v.next; } } while v1.len() > 0 || v2.len() > 0 || carry_bit > 0 { let mut sum = carry_bit; if !v1.is_empty() { sum += v1.pop().unwrap(); } if !v2.is_empty() { sum += v2.pop().unwrap(); } let mut node = ListNode::new(sum % 10); node.next = dummy.next.take(); dummy.next = Some(Box::new(node)); carry_bit = sum / 10; } dummy.next } } #[cfg(test)] mod tests { use super::*; #[test] fn it_works() { assert_eq!( Solution::add_two_numbers(linked![2, 4, 3], linked![5, 6, 4]), linked![8, 0, 7] ); assert_eq!( Solution::add_two_numbers(linked![5], linked![5]), linked![1, 0] ); } } // helper function for test pub fn to_list(vec: Vec<i32>) -> Option<Box<ListNode>> { let mut current = None; for &v in vec.iter().rev() { let mut node = ListNode::new(v); node.next = current; current = Some(Box::new(node)); } current } #[macro_export] macro_rules! linked { ($($e:expr),*) => {to_list(vec![$($e.to_owned()), *])}; ($($e:expr,)*) => {to_list(vec![$($e.to_owned()), *])}; }
23.58
107
0.467769
64fd451241fd5d24772c1b619457c4e8df8c773d
1,471
use std::{ env, fs, io, path::{Path, PathBuf}, }; #[cfg(windows)] use winres::WindowsResource; const COPY_DIR: &'static str = "assets"; /// A helper function for recursively copying a directory. fn copy_dir<P, Q>(from: P, to: Q) where P: AsRef<Path>, Q: AsRef<Path>, { let to = to.as_ref().to_path_buf(); for path in fs::read_dir(from).unwrap() { let path = path.unwrap().path(); let to = to.clone().join(path.file_name().unwrap()); if path.is_file() { fs::copy(&path, to).unwrap(); } else if path.is_dir() { if !to.exists() { fs::create_dir(&to).unwrap(); } copy_dir(&path, to); } } } fn main() { // Request the output directory let profile = env::var("PROFILE").unwrap(); let out = env::current_dir() .unwrap() .join(PathBuf::from(format!("../target/{}/{}", profile, COPY_DIR))); // If it is already in the output directory, delete it and start over if out.exists() { fs::remove_dir_all(&out).unwrap(); } // Create the out directory fs::create_dir(&out).unwrap(); // Copy the directory copy_dir(COPY_DIR, &out); // Add Icon to exec #[cfg(windows)] { WindowsResource::new() // This path can be absolute, or relative to your crate root. .set_icon("assets/icon.ico") .compile() .unwrap(); } }
23.349206
76
0.541128
8f17f6416a695e3443861ff042513e7c08b63357
3,681
use std::path::PathBuf; use pop_launcher; use pop_launcher::IconSource; use serde::Deserialize; use crate::app::active_mode::WEB_CONFIG; use crate::db::desktop_entry::DesktopEntryEntity; use crate::db::run::RunCommandEntity; use crate::db::web::WebEntity; use crate::entries::AsEntry; use crate::freedesktop::{Extension, IconPath}; lazy_static! { static ref TERMINAL_ICON: Option<IconSource> = get_plugin_icon("terminal/plugin.ron"); static ref WEB_ICON: Option<IconSource> = get_plugin_icon("web/plugin.ron"); } #[derive(Deserialize)] struct PluginConfig { icon: IconSource, } impl<'a> AsEntry<'a> for DesktopEntryEntity { fn get_display_name(&self) -> &str { self.name.as_str() } fn get_icon(&self) -> Option<IconPath> { IconPath::from_path(&self.icon) } } impl<'a> AsEntry<'a> for RunCommandEntity { fn get_display_name(&self) -> &str { self.command.as_str() } fn get_icon(&self) -> Option<IconPath> { IconPath::from_icon_source(TERMINAL_ICON.as_ref()) } } impl<'a> AsEntry<'a> for WebEntity { fn get_display_name(&self) -> &str { self.query.as_str() } fn get_icon(&self) -> Option<IconPath> { WEB_CONFIG .as_ref() .map(|config| { config .rules .iter() .find(|rule| rule.matches.contains(&self.kind)) }) .flatten() // FIXME: see web/config.ron .map(|item| item.queries.first().unwrap().name.to_owned()) .map(|web_query_kind| { ( dirs::cache_dir().unwrap().join("pop-launcher"), web_query_kind, ) }) .map(|(path, filename)| { // Unfortunately we need to copy .ico files to png extension for iced // To render the icon let path = path.join(format!("{}.png", &filename)); return if path.exists() { Some(IconPath { path, extension: Extension::Png, }) } else if path.with_extension("ico").exists() { ico_to_png(path.with_extension("ico")); Some(IconPath { path, extension: Extension::Png, }) } else { IconPath::from_icon_source(WEB_ICON.as_ref()) }; }) .flatten() } } fn get_plugin_icon(plugin: &str) -> Option<IconSource> { let path = pop_launcher::plugin_paths() .map(|path| path.as_ref().join(plugin)) .find(|path| path.exists()); path.map(std::fs::read_to_string) .map(Result::ok) .flatten() .map(|plugin| ron::from_str::<PluginConfig>(&plugin)) .map(Result::ok) .flatten() .map(|plugin| plugin.icon) } // FIXME: This should be removed fn ico_to_png(path: PathBuf) { let file = std::fs::File::open(&path).unwrap(); match ico::IconDir::read(file) { Ok(icon) => { for entry in icon.entries() { if !entry.is_png() { let image = entry.decode().unwrap(); let file = std::fs::File::create(&path.with_extension("png")).unwrap(); image.write_png(file).unwrap(); } } } Err(_) => { // We were unable to read the icon, it's probably a png std::fs::copy(&path, &path.with_extension("png")).unwrap(); } } }
29.926829
91
0.516164
d53aa07541d11c24e06d0189cc436979816ead5e
147
use crate::multiboot::{self, info::memory::MemoryMap}; pub fn try_get() -> Option<MemoryMap<'static>> { multiboot::info::get().memory_map() }
24.5
54
0.673469
db000bd2f21e10388c8005788fce359121d38bd0
996
// configure default clippy lints #![deny(clippy::correctness)] #![warn(clippy::complexity, clippy::style, clippy::perf, clippy::pedantic)] // disable some pedantic lints #![allow( clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_sign_loss, clippy::default_trait_access, clippy::missing_errors_doc, clippy::module_name_repetitions, clippy::must_use_candidate, clippy::non_ascii_literal, clippy::option_if_let_else, clippy::similar_names, clippy::single_match_else, clippy::type_repetition_in_bounds, clippy::wildcard_imports )] // enable some restriction lints #![warn(clippy::print_stdout, clippy::print_stderr, clippy::dbg_macro)] pub mod collections; pub mod dataset; pub mod error; pub mod operations; pub mod plots; pub mod primitives; pub mod provenance; pub mod raster; pub mod spatial_reference; pub mod util; /// Compiles Geo Engine Pro #[cfg(feature = "pro")] pub mod pro;
26.210526
75
0.746988
ccf881966f54ad1bcd7cc4a189f93381279b08bf
5,356
// SPDX-License-Identifier: Apache-2.0 use crate::backend::probe::x86_64::{CpuId, Vendor}; use crate::backend::{self, Datum, Keep}; use crate::binary::Component; use std::arch::x86_64::__cpuid_count; use std::fs::OpenOptions; use std::io::Result; use std::mem::transmute; use std::path::PathBuf; use std::str::from_utf8; use std::sync::Arc; const CPUIDS: &[CpuId] = &[ CpuId { name: "CPU Manufacturer", leaf: 0x00000000, subl: 0x00000000, func: |res| { let name: [u8; 12] = unsafe { transmute([res.ebx, res.edx, res.ecx]) }; let name = from_utf8(&name[..]).unwrap(); (name == "AuthenticAMD", Some(name.into())) }, vend: None, }, CpuId { name: " Microcode support", leaf: 0x80000002, subl: 0x00000000, func: |_res| { let cpu_name = { let mut bytestr = Vec::with_capacity(48); for cpuid in 0x8000_0002_u32..=0x8000_0004_u32 { let cpuid = unsafe { __cpuid_count(cpuid, 0x0000_0000) }; let mut bytes: Vec<u8> = [cpuid.eax, cpuid.ebx, cpuid.ecx, cpuid.edx] .iter() .flat_map(|r| r.to_le_bytes().to_vec()) .collect(); bytestr.append(&mut bytes); } String::from_utf8(bytestr).unwrap().trim().to_string() }; (cpu_name.to_uppercase().contains("EPYC"), Some(cpu_name)) }, vend: Some(Vendor::Amd), }, CpuId { name: " Secure Memory Encryption (SME)", leaf: 0x8000001f, subl: 0x00000000, func: |res| (res.eax & 0x1 != 0, None), vend: Some(Vendor::Amd), }, CpuId { name: " Physical address bit reduction", leaf: 0x8000001f, subl: 0x00000000, func: |res| { let field = res.ebx & 0b1111_1100_0000 >> 6; (true, Some(format!("{}", field))) }, vend: Some(Vendor::Amd), }, CpuId { name: " C-bit location in page table entry", leaf: 0x8000001f, subl: 0x00000000, func: |res| { let field = res.ebx & 0b01_1111; (true, Some(format!("{}", field))) }, vend: Some(Vendor::Amd), }, CpuId { name: " Secure Encrypted Virtualization (SEV)", leaf: 0x8000001f, subl: 0x00000000, func: |res| (res.eax & (1 << 1) != 0, None), vend: Some(Vendor::Amd), }, CpuId { name: " Number of encrypted guests supported simultaneously", leaf: 0x8000001f, subl: 0x00000000, func: |res| (true, Some(format!("{}", res.ecx))), vend: Some(Vendor::Amd), }, CpuId { name: " Minimum ASID value for SEV-enabled, SEV-ES disabled guest", leaf: 0x8000001f, subl: 0x00000000, func: |res| (true, Some(format!("{}", res.edx))), vend: Some(Vendor::Amd), }, CpuId { name: " Secure Encrypted Virtualization Encrypted State (SEV-ES)", leaf: 0x8000001f, subl: 0x00000000, func: |res| (res.eax & (1 << 3) != 0, None), vend: Some(Vendor::Amd), }, CpuId { name: " Page Flush MSR available", leaf: 0x8000001f, subl: 0x00000000, func: |res| (res.eax & (1 << 2) != 0, None), vend: Some(Vendor::Amd), }, ]; fn dev_sev() -> Datum { Datum { name: "Driver".into(), pass: std::path::Path::new("/dev/sev").exists(), info: Some("/dev/sev".into()), mesg: None, } } fn sev_enabled_in_kernel() -> Datum { let mut datum = Datum { name: " SEV is enabled in host kernel".into(), pass: false, info: None, mesg: None, }; let mod_param = "/sys/module/kvm_amd/parameters/sev"; if std::path::Path::new(mod_param).exists() { if let Ok(val) = std::fs::read_to_string(mod_param) { datum.pass = val.trim() == "1"; } } datum } fn dev_sev_readable() -> Datum { let opts = OpenOptions::new().read(true).open("/dev/sev"); Datum { name: " /dev/sev is readable by user".into(), pass: opts.is_ok(), info: None, mesg: None, } } fn dev_sev_writable() -> Datum { let opts = OpenOptions::new().write(true).open("/dev/sev"); Datum { name: " /dev/sev is writable by user".into(), pass: opts.is_ok(), info: None, mesg: None, } } fn has_kvm_support() -> Datum { use crate::backend::Backend; Datum { name: "KVM support".into(), pass: backend::kvm::Backend.have(), info: None, mesg: None, } } pub struct Backend; impl backend::Backend for Backend { fn data(&self) -> Vec<Datum> { let mut data = vec![]; data.extend(CPUIDS.iter().map(|c| c.into())); data.push(dev_sev()); data.push(sev_enabled_in_kernel()); data.push(dev_sev_readable()); data.push(dev_sev_writable()); data.push(has_kvm_support()); data } fn shim(&self) -> Result<PathBuf> { unimplemented!() } fn build(&self, shim: Component, code: Component) -> Result<Arc<dyn Keep>> { unimplemented!() } }
27.608247
89
0.518297
c1b2ab7252a5b37ac878ef7987ede6e3a05aa134
2,037
#![feature(test)] extern crate test; use morgan_runtime::treasury::*; use morgan_interface::account::Account; use morgan_interface::genesis_block::create_genesis_block; use morgan_interface::bvm_address::BvmAddr; use std::sync::Arc; use test::Bencher; fn deposit_many(treasury: &Treasury, addresss: &mut Vec<BvmAddr>, num: usize) { for t in 0..num { let address = BvmAddr::new_rand(); let account = Account::new((t + 1) as u64, 0, 0, &Account::default().owner); addresss.push(address.clone()); assert!(treasury.get_account(&address).is_none()); treasury.deposit(&address, (t + 1) as u64); assert_eq!(treasury.get_account(&address).unwrap(), account); } } #[bench] fn test_accounts_create(bencher: &mut Bencher) { let (genesis_block, _) = create_genesis_block(10_000); let treasury0 = Treasury::new_with_paths(&genesis_block, Some("bench_a0".to_string())); bencher.iter(|| { let mut addresss: Vec<BvmAddr> = vec![]; deposit_many(&treasury0, &mut addresss, 1000); }); } #[bench] fn test_accounts_squash(bencher: &mut Bencher) { let (genesis_block, _) = create_genesis_block(100_000); let mut treasuries: Vec<Arc<Treasury>> = Vec::with_capacity(10); treasuries.push(Arc::new(Treasury::new_with_paths( &genesis_block, Some("bench_a1".to_string()), ))); let mut addresss: Vec<BvmAddr> = vec![]; deposit_many(&treasuries[0], &mut addresss, 250000); treasuries[0].freeze(); // Measures the performance of the squash operation merging the accounts // with the majority of the accounts present in the parent treasury that is // moved over to this treasury. bencher.iter(|| { treasuries.push(Arc::new(Treasury::new_from_parent( &treasuries[0], &BvmAddr::default(), 1u64, ))); for accounts in 0..10000 { treasuries[1].deposit(&addresss[accounts], (accounts + 1) as u64); } treasuries[1].squash(); }); }
34.525424
91
0.646539
f955a8ab4963e53a57cbb3814b9eaec6275fc048
1,625
// Take a look at the license at the top of the repository in the LICENSE file. use crate::{Overlay, Widget}; use glib::object::Cast; use glib::signal::{connect_raw, SignalHandlerId}; use glib::translate::*; use glib::IsA; use std::mem::transmute; use std::ptr; pub trait OverlayExtManual: 'static { fn connect_get_child_position<F>(&self, f: F) -> SignalHandlerId where F: Fn(&Self, &Widget) -> Option<gdk::Rectangle> + 'static; } impl<O: IsA<Overlay>> OverlayExtManual for O { fn connect_get_child_position<F>(&self, f: F) -> SignalHandlerId where F: Fn(&Self, &Widget) -> Option<gdk::Rectangle> + 'static, { unsafe { let f: Box<F> = Box::new(f); connect_raw( self.to_glib_none().0 as *mut _, b"get-child-position\0".as_ptr() as *mut _, Some(transmute(get_child_position_trampoline::<Self, F> as usize)), Box::into_raw(f), ) } } } unsafe extern "C" fn get_child_position_trampoline< T, F: Fn(&T, &Widget) -> Option<gdk::Rectangle> + 'static, >( this: *mut ffi::GtkOverlay, widget: *mut ffi::GtkWidget, allocation: *mut gdk::ffi::GdkRectangle, f: glib::ffi::gpointer, ) -> glib::ffi::gboolean where T: IsA<Overlay>, { let f: &F = &*(f as *const F); match f( &Overlay::from_glib_borrow(this).unsafe_cast_ref(), &from_glib_borrow(widget), ) { Some(rect) => { ptr::write(allocation, ptr::read(rect.to_glib_none().0)); true } None => false, } .to_glib() }
27.542373
83
0.579692
167ae34fed6aec6f686ffdcb7ed3a7b919697536
1,279
//! Utilities for interacting with refcounted PHP types. use crate::bindings::{zend_refcounted_h, zend_string}; use super::object::ZendObject; /// Object used to store Zend reference counter. pub type ZendRefcount = zend_refcounted_h; /// Implemented on refcounted types. pub trait PhpRc { /// Returns an immutable reference to the corresponding refcount object. fn get_rc(&self) -> &ZendRefcount; /// Returns a mutable reference to the corresponding refcount object. fn get_rc_mut(&mut self) -> &mut ZendRefcount; /// Returns the number of references to the object. fn get_count(&self) -> u32 { self.get_rc().refcount } /// Increments the reference counter by 1. fn inc_count(&mut self) { self.get_rc_mut().refcount += 1 } /// Decrements the reference counter by 1. fn dec_count(&mut self) { self.get_rc_mut().refcount -= 1; } } macro_rules! rc { ($($t: ty),*) => { $( impl PhpRc for $t { fn get_rc(&self) -> &ZendRefcount { &self.gc } fn get_rc_mut(&mut self) -> &mut ZendRefcount { &mut self.gc } } )* }; } rc!(ZendObject, zend_string);
25.078431
76
0.583268
e418a63f4ea9e77c718db7c550b2335aa637ba11
4,994
use std::rc::Rc; use crate::{ gl::{ self, Blend, BlendEquation, BlendFactor, BlendFunc, BlendOp, DrawParams, InstancedDrawUnit, }, program, }; use super::super::{Light, OccluderLineVertex}; program! { program ShadowMapProgram params { max_num_lights: u32, } attributes { a_occluder: OccluderLineVertex, i_light: Light, } defines [ pi => std::f32::consts::PI, max_num_lights => max_num_lights, ] vertex glsl! { flat out vec2 v_light_position; flat out float v_light_radius; flat out int v_is_front; out vec4 v_edge; out float v_angle; float angle_to_light(vec2 position) { vec2 delta = position - i_light_position.xy; return atan(delta.y, delta.x); } void main() { if (gl_InstanceID == a_occluder_ignore_light_index1 || gl_InstanceID == a_occluder_ignore_light_index2 || i_light_position.z >= a_occluder_height) { gl_Position = vec4(-10.0, -10.0, -10.0, 1.0); return; } v_light_position = i_light_position.xy; v_light_radius = i_light_radius; vec3 c = cross(vec3(a_occluder_line_0 - i_light_position.xy, 0.0), vec3(a_occluder_line_1 - i_light_position.xy, 0.0)); v_is_front = (((a_occluder_order == 0 || a_occluder_order == 2) && c.z < 0.0) || ((a_occluder_order == 1 || a_occluder_order == 3) && c.z > 0.0)) ? 1 : 0; float angle_0 = angle_to_light(a_occluder_line_0); float angle_1 = angle_to_light(a_occluder_line_1); v_edge = vec4(a_occluder_line_0, a_occluder_line_1); v_edge = mix(v_edge, v_edge.zwxy, step(angle_0, angle_1)); v_angle = angle_0; if (abs(angle_0 - angle_1) > {{pi}}) { if (a_occluder_order == 0) { v_angle = -{{pi}}; } else if (a_occluder_order == 1 || a_occluder_order == 2) { v_angle = min(angle_0, angle_1); } else { v_angle = {{pi}}; } } gl_Position = vec4( v_angle / {{pi}}, (float(gl_InstanceID) + 0.5) / float({{max_num_lights}}) * 2.0 - 1.0, 0.0, 1.0 ); } } fragment glsl! { flat in vec2 v_light_position; flat in float v_light_radius; flat in int v_is_front; in vec4 v_edge; in float v_angle; out vec4 f_color; float ray_line_segment_intersection( vec2 o, vec2 d, vec2 p, vec2 q ) { /** ray(s) = o + d * s (0 <= s) line(t) = p + (q - p) * t (0 <= t <= 1) ray(s) = line(t) <=> o + d * s = p + (q - p) * t <=> d * s + (p - q) * t = p - o <=> M * [[s], [t]] = p - o where M = [[d.x, d.y], [p.x - q.x, p.y - q.y]] <=> [[s], [t]] = M^-1 (p - o) (if M is invertible) **/ float det = d.x * (p.y - q.y) + d.y * (q.x - p.x); if (abs(det) < 0.0000001) return 1.0; mat2 m = mat2(d.x, d.y, p.x - q.x, p.y - q.y); vec2 time = inverse(m) * (p - o); float s = time.x; float t = time.y; if (s >= 0.0 && s <= 1.0 && t >= 0.0 && t <= 1.0) { return s; } else { return 1.0; } } void main() { float t = ray_line_segment_intersection( v_light_position, vec2(cos(v_angle), sin(v_angle)) * v_light_radius, v_edge.xy, v_edge.zw ); f_color = vec4( v_is_front == 0 ? vec2(1.0, t) : vec2(t, 1.0), 0.0, 1.0); } } } pub struct ShadowMapPass { program: ShadowMapProgram, } impl ShadowMapPass { pub fn new(gl: Rc<gl::Context>, max_num_lights: u32) -> Result<Self, gl::Error> { let program = ShadowMapProgram::new(gl, max_num_lights)?; Ok(Self { program }) } pub fn draw(&self, draw_unit: InstancedDrawUnit<(OccluderLineVertex, Light)>) { gl::draw_instanced( &self.program, (), [], draw_unit, &DrawParams { blend: Some(Blend { equation: BlendEquation::same(BlendOp::Min), func: BlendFunc::same(BlendFactor::One, BlendFactor::One), ..Blend::default() }), ..DrawParams::default() }, ) } }
30.638037
99
0.448738
b935922a0cd4102d28dcc326cf4843d59b08a53a
658
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass use std::ops::Add; extern "C" fn foo<T: Add>(a: T, b: T) -> T::Output { a + b } fn main() { assert_eq!(100u8, foo(0u8, 100u8)); assert_eq!(100u16, foo(0u16, 100u16)); }
32.9
68
0.691489
fb09bc5d594c89979dd7f8d5ed9abb78a692376f
1,029
use async_channel::unbounded; use async_dup::Arc; use easy_parallel::Parallel; use smol::{future, Async, Executor}; use std::net::TcpListener; use tophat::server::accept; fn main() -> Result<(), Box<dyn std::error::Error>> { let ex = Executor::new(); let (signal, shutdown) = unbounded::<()>(); Parallel::new() .each(0..num_cpus::get().max(1), |_| future::block_on(ex.run(shutdown.recv()))) .finish(|| future::block_on(async { drop(signal); })); let listener = Async::<TcpListener>::bind(([127,0,0,1],9999))?; smol::block_on(async { loop { let (stream, _) = listener.accept().await?; let stream = Arc::new(stream); let task = smol::spawn(async move { let serve = accept(stream, |_req, resp_wtr| async { resp_wtr.send().await }).await; if let Err(err) = serve { eprintln!("Error: {}", err); } }); task.detach(); } }) }
27.810811
99
0.524781
099fa060f8a4a02c383961ba4470aa59da0f21f9
3,510
use encoding::DecoderTrap; use encoding::all::WINDOWS_31J; use encoding::types::EncodingRef; use std::io::{self, Read, Error, ErrorKind}; use std::fs::File; use std::path::Path; use std::borrow::Cow; use std::string::FromUtf8Error; pub const MIN_ROM_SIZE: usize = 1024; pub const MAX_ROM_SIZE: usize = 16 * 1024 * 1024; pub struct Rom { bytes: Box<[u8]>, bytes_ptr: *mut u8, } impl Rom { pub fn load<P: AsRef<Path>>(file_name: P) -> io::Result<Rom> { let mut file = File::open(file_name)?; let mut vec = Vec::new(); file.read_to_end(&mut vec)?; let size = vec.len(); if size < MIN_ROM_SIZE || size > MAX_ROM_SIZE || size.count_ones() != 1 { return Err(Error::new(ErrorKind::InvalidData, "Invalid ROM size")); } let mut bytes = vec.into_boxed_slice(); let bytes_ptr = bytes.as_mut_ptr(); Ok(Rom { bytes: bytes, bytes_ptr: bytes_ptr, }) } pub fn size(&self) -> usize { self.bytes.len() } pub fn read_byte(&self, addr: u32) -> u8 { let addr = self.mask_addr(addr); unsafe { *self.bytes_ptr.offset(addr as _) } } pub fn read_halfword(&self, addr: u32) -> u16 { let addr = addr & 0xfffffffe; let addr = self.mask_addr(addr); unsafe { (*self.bytes_ptr.offset(addr as _) as u16) | ((*self.bytes_ptr.offset((addr + 1) as _) as u16) << 8) } } fn mask_addr(&self, addr: u32) -> u32 { let mask = (self.bytes.len() - 1) as u32; addr & mask } pub fn name(&self) -> Result<String, Cow<'static, str>> { let header_offset = self.header_offset(); let name_offset = header_offset; let name_bytes = &self.bytes[name_offset..name_offset + 0x14]; // Windows-31J is a superset of Shift JIS, which technically makes this // code a bit too permissive, but saves us from writing our own decoder // just to read ROM names. Even if we did try to write our own, // I haven't seen any documentation that mentions which specific Shift JIS // version we should use in the first place, especially since the more // widely-used ones were standardized in 1997, after the Virtual Boy was // in production. let shift_jis_encoding = WINDOWS_31J as EncodingRef; shift_jis_encoding.decode(name_bytes, DecoderTrap::Strict) } pub fn maker_code(&self) -> Result<String, FromUtf8Error> { let header_offset = self.header_offset(); let maker_code_offset = header_offset + 0x19; let maker_code_bytes = &self.bytes[maker_code_offset..maker_code_offset + 2]; let mut maker_code_vec = Vec::new(); maker_code_vec.extend_from_slice(maker_code_bytes); String::from_utf8(maker_code_vec) } pub fn game_code(&self) -> Result<String, FromUtf8Error> { let header_offset = self.header_offset(); let game_code_offset = header_offset + 0x1b; let game_code_bytes = &self.bytes[game_code_offset..game_code_offset + 2]; let mut game_code_vec = Vec::new(); game_code_vec.extend_from_slice(game_code_bytes); String::from_utf8(game_code_vec) } pub fn game_version_byte(&self) -> u8 { let header_offset = self.header_offset(); self.bytes[header_offset + 0x1f] } fn header_offset(&self) -> usize { self.size() - 544 } }
33.113208
85
0.61339
72dc8cb2f0b54ddb1f3bdfeb1e977d7b06b20316
20,979
// Copyright (c) The XPeer Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ account::AccountData, assert_prologue_disparity, assert_prologue_parity, common_transactions::*, compile::{compile_program_with_address, compile_script}, executor::FakeExecutor, }; use assert_matches::assert_matches; use bytecode_verifier::VerifiedModule; use compiler::Compiler; use config::config::{NodeConfigHelpers, VMPublishingOption}; use crypto::signing::KeyPair; use std::collections::HashSet; use tiny_keccak::Keccak; use types::{ account_address::AccountAddress, test_helpers::transaction_test_helpers, transaction::{ TransactionArgument, TransactionStatus, MAX_TRANSACTION_SIZE_IN_BYTES, SCRIPT_HASH_LENGTH, }, vm_error::{ ExecutionStatus, VMStatus, VMValidationStatus, VMVerificationError, VMVerificationStatus, }, }; use vm::gas_schedule; use vm_genesis::encode_transfer_program; #[test] fn verify_signature() { let mut executor = FakeExecutor::from_genesis_file(); let sender = AccountData::new(900_000, 10); executor.add_account_data(&sender); // Generate a new key pair to try and sign things with. let other_keypair = KeyPair::new(::crypto::signing::generate_keypair().0); let program = encode_transfer_program(sender.address(), 100); let signed_txn = transaction_test_helpers::get_test_unchecked_txn( *sender.address(), 0, other_keypair.private_key().clone(), sender.account().pubkey, Some(program), ); assert_prologue_parity!( executor.verify_transaction(signed_txn.clone()), executor.execute_transaction(signed_txn).status(), VMStatus::Validation(VMValidationStatus::InvalidSignature) ); } #[test] fn verify_rejected_write_set() { let mut executor = FakeExecutor::from_genesis_file(); let sender = AccountData::new(900_000, 10); executor.add_account_data(&sender); let signed_txn = transaction_test_helpers::get_write_set_txn( *sender.address(), 0, sender.account().privkey.clone(), sender.account().pubkey, None, ) .into_inner(); assert_prologue_parity!( executor.verify_transaction(signed_txn.clone()), executor.execute_transaction(signed_txn).status(), VMStatus::Validation(VMValidationStatus::RejectedWriteSet) ); } #[test] fn verify_whitelist() { // Making sure the whitelist's hash matches the current compiled script. If this fails, please // try run `cargo run` under vm_genesis and update the vm_config in node.config.toml and in // config.rs in xpeer/config crate. let programs: HashSet<_> = vec![ PEER_TO_PEER.clone(), MINT.clone(), ROTATE_KEY.clone(), CREATE_ACCOUNT.clone(), ] .into_iter() .map(|s| { let mut hash = [0u8; SCRIPT_HASH_LENGTH]; let mut keccak = Keccak::new_sha3_256(); keccak.update(&s); keccak.finalize(&mut hash); hash }) .collect(); let config = NodeConfigHelpers::get_single_node_test_config(false); assert_eq!( Some(&programs), config.vm_config.publishing_options.get_whitelist_set() ) } #[test] fn verify_simple_payment() { // create a FakeExecutor with a genesis from file let mut executor = FakeExecutor::from_genesis_file(); // create and publish a sender with 1_000_000 coins and a receiver with 100_000 coins let sender = AccountData::new(900_000, 10); let receiver = AccountData::new(100_000, 10); executor.add_account_data(&sender); executor.add_account_data(&receiver); // define the arguments to the peer to peer transaction let transfer_amount = 1_000; let mut args: Vec<TransactionArgument> = Vec::new(); args.push(TransactionArgument::Address(*receiver.address())); args.push(TransactionArgument::U64(transfer_amount)); // Create a new transaction that has the exact right sequence number. let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 10, // this should be programmable but for now is 1 more than the setup 10_000, 1, ); assert_eq!(executor.verify_transaction(txn), None); // Create a new transaction that has the bad auth key. let txn = sender.account().create_signed_txn_with_args_and_sender( *receiver.address(), PEER_TO_PEER.clone(), args.clone(), 10, // this should be programmable but for now is 1 more than the setup 10_000, 1, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::InvalidAuthKey) ); // Create a new transaction that has a old sequence number. let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 1, 10_000, 1, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::SequenceNumberTooOld) ); // Create a new transaction that has a too new sequence number. let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 11, 10_000, 1, ); assert_prologue_disparity!( executor.verify_transaction(txn.clone()) => None, executor.execute_transaction(txn).status() => TransactionStatus::Discard(VMStatus::Validation( VMValidationStatus::SequenceNumberTooNew )) ); // Create a new transaction that doesn't have enough balance to pay for gas. let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 10, 1_000_000, 1, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::InsufficientBalanceForTransactionFee) ); // XXX TZ: TransactionExpired // RejectedWriteSet is tested in `verify_rejected_write_set` // InvalidWriteSet is tested in genesis.rs // Create a new transaction from a bogus account that doesn't exist let bogus_account = AccountData::new(100_000, 10); let txn = bogus_account.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 10, 10_000, 1, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::SendingAccountDoesNotExist(_)) ); // RejectedWriteSet is tested in `verify_rejected_write_set` // InvalidWriteSet is tested in genesis.rs // The next couple tests test transaction size, and bounds on gas price and the number of gas // units that can be submitted with a transaction. // // We test these in the reverse order that they appear in verify_transaction, and build up the // errors one-by-one to make sure that we are both catching all of them, and that we are doing // so in the specified order. let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 10, 1_000_000, gas_schedule::MAX_PRICE_PER_GAS_UNIT + 1, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::GasUnitPriceAboveMaxBound(_)) ); // Note: We can't test this at the moment since MIN_PRICE_PER_GAS_UNIT is set to 0 for testnet. // Uncomment this test once we have a non-zero MIN_PRICE_PER_GAS_UNIT. // let txn = sender.account().create_signed_txn_with_args( // PEER_TO_PEER.clone(), // args.clone(), // 10, // 1_000_000, // gas_schedule::MIN_PRICE_PER_GAS_UNIT - 1, // ); // assert_eq!( // executor.verify_transaction(txn), // Some(VMStatus::Validation( // VMValidationStatus::GasUnitPriceBelowMinBound // )) // ); let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 10, 1, gas_schedule::MAX_PRICE_PER_GAS_UNIT, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::MaxGasUnitsBelowMinTransactionGasUnits(_)) ); let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 10, gas_schedule::MIN_TRANSACTION_GAS_UNITS - 1, gas_schedule::MAX_PRICE_PER_GAS_UNIT, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::MaxGasUnitsBelowMinTransactionGasUnits(_)) ); let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), args.clone(), 10, gas_schedule::MAXIMUM_NUMBER_OF_GAS_UNITS + 1, gas_schedule::MAX_PRICE_PER_GAS_UNIT, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::MaxGasUnitsExceedsMaxGasUnitsBound(_)) ); let txn = sender.account().create_signed_txn_with_args( PEER_TO_PEER.clone(), vec![TransactionArgument::U64(42); MAX_TRANSACTION_SIZE_IN_BYTES], 10, gas_schedule::MAXIMUM_NUMBER_OF_GAS_UNITS + 1, gas_schedule::MAX_PRICE_PER_GAS_UNIT, ); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::ExceededMaxTransactionSize(_)) ); // Create a new transaction that swaps the two arguments. let mut args: Vec<TransactionArgument> = Vec::new(); args.push(TransactionArgument::U64(transfer_amount)); args.push(TransactionArgument::Address(*receiver.address())); let txn = sender .account() .create_signed_txn_with_args(PEER_TO_PEER.clone(), args, 10, 10_000, 1); assert_eq!( executor.verify_transaction(txn), Some(VMStatus::Verification(vec![VMVerificationStatus::Script( VMVerificationError::TypeMismatch("Actual Type Mismatch".to_string()) )])) ); // Create a new transaction that has no argument. let txn = sender .account() .create_signed_txn_with_args(PEER_TO_PEER.clone(), vec![], 10, 10_000, 1); assert_eq!( executor.verify_transaction(txn), Some(VMStatus::Verification(vec![VMVerificationStatus::Script( VMVerificationError::TypeMismatch("Actual Type Mismatch".to_string()) )])) ); } #[test] pub fn test_whitelist() { // create a FakeExecutor with a genesis from file let mut executor = FakeExecutor::from_genesis_file(); // create an empty transaction let sender = AccountData::new(1_000_000, 10); executor.add_account_data(&sender); let random_script = compile_script("main() {return;}"); let txn = sender .account() .create_signed_txn_with_args(random_script, vec![], 10, 10_000, 1); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::UnknownScript) ); } #[test] pub fn test_arbitrary_script_execution() { // create a FakeExecutor with a genesis from file let mut executor = FakeExecutor::from_genesis_with_options(VMPublishingOption::CustomScripts); // create an empty transaction let sender = AccountData::new(1_000_000, 10); executor.add_account_data(&sender); let random_script = compile_script("main() {return;}"); let txn = sender .account() .create_signed_txn_with_args(random_script, vec![], 10, 10_000, 1); assert_eq!(executor.verify_transaction(txn.clone()), None); assert_eq!( executor.execute_transaction(txn).status(), &TransactionStatus::Keep(VMStatus::Execution(ExecutionStatus::Executed)) ); } #[test] pub fn test_no_publishing() { // create a FakeExecutor with a genesis from file let mut executor = FakeExecutor::from_genesis_with_options(VMPublishingOption::CustomScripts); // create a transaction trying to publish a new module. let sender = AccountData::new(1_000_000, 10); let receiver = AccountData::new(100_000, 10); executor.add_account_data(&sender); executor.add_account_data(&receiver); let program = String::from( " modules: module M { public max(a: u64, b: u64): u64 { if (copy(a) > copy(b)) { return copy(a); } else { return copy(b); } return 0; } public sum(a: u64, b: u64): u64 { let c: u64; c = copy(a) + copy(b); return copy(c); } } script: import 0x0.XPeerAccount; main (payee: address, amount: u64) { XPeerAccount.pay_from_sender(move(payee), move(amount)); return; } ", ); let mut args: Vec<TransactionArgument> = Vec::new(); args.push(TransactionArgument::Address(*receiver.address())); args.push(TransactionArgument::U64(100)); let random_script = compile_program_with_address(sender.address(), &program, args); let txn = sender .account() .create_signed_txn_impl(*sender.address(), random_script, 10, 10_000, 1); assert_prologue_parity!( executor.verify_transaction(txn.clone()), executor.execute_transaction(txn).status(), VMStatus::Validation(VMValidationStatus::UnknownModule) ); } #[test] pub fn test_open_publishing_invalid_address() { // create a FakeExecutor with a genesis from file let mut executor = FakeExecutor::from_genesis_with_options(VMPublishingOption::Open); // create a transaction trying to publish a new module. let sender = AccountData::new(1_000_000, 10); let receiver = AccountData::new(100_000, 10); executor.add_account_data(&sender); executor.add_account_data(&receiver); let program = String::from( " modules: module M { public max(a: u64, b: u64): u64 { if (copy(a) > copy(b)) { return copy(a); } else { return copy(b); } return 0; } public sum(a: u64, b: u64): u64 { let c: u64; c = copy(a) + copy(b); return copy(c); } } script: import 0x0.XPeerAccount; main (payee: address, amount: u64) { XPeerAccount.pay_from_sender(move(payee), move(amount)); return; } ", ); let mut args: Vec<TransactionArgument> = Vec::new(); args.push(TransactionArgument::Address(*receiver.address())); args.push(TransactionArgument::U64(100)); let random_script = compile_program_with_address(receiver.address(), &program, args); let txn = sender .account() .create_signed_txn_impl(*sender.address(), random_script, 10, 10_000, 1); // verify and fail because the addresses don't match let vm_status = executor.verify_transaction(txn.clone()); let status = match vm_status { Some(VMStatus::Verification(status)) => status, vm_status => panic!("Unexpected verification status: {:?}", vm_status), }; match status.as_slice() { &[VMVerificationStatus::Module( 0, VMVerificationError::ModuleAddressDoesNotMatchSender(_), )] => {} err => panic!("Unexpected verification error: {:?}", err), }; // execute and fail for the same reason let output = executor.execute_transaction(txn); let status = match output.status() { TransactionStatus::Discard(VMStatus::Verification(status)) => status, vm_status => panic!("Unexpected verification status: {:?}", vm_status), }; match status.as_slice() { &[VMVerificationStatus::Module( 0, VMVerificationError::ModuleAddressDoesNotMatchSender(_), )] => {} err => panic!("Unexpected verification error: {:?}", err), }; } #[test] pub fn test_open_publishing() { // create a FakeExecutor with a genesis from file let mut executor = FakeExecutor::from_genesis_with_options(VMPublishingOption::Open); // create a transaction trying to publish a new module. let sender = AccountData::new(1_000_000, 10); let receiver = AccountData::new(100_000, 10); executor.add_account_data(&sender); executor.add_account_data(&receiver); let program = String::from( " modules: module M { public max(a: u64, b: u64): u64 { if (copy(a) > copy(b)) { return copy(a); } else { return copy(b); } return 0; } public sum(a: u64, b: u64): u64 { let c: u64; c = copy(a) + copy(b); return copy(c); } } script: import 0x0.XPeerAccount; main (payee: address, amount: u64) { XPeerAccount.pay_from_sender(move(payee), move(amount)); return; } ", ); let mut args: Vec<TransactionArgument> = Vec::new(); args.push(TransactionArgument::Address(*receiver.address())); args.push(TransactionArgument::U64(100)); let random_script = compile_program_with_address(sender.address(), &program, args); let txn = sender .account() .create_signed_txn_impl(*sender.address(), random_script, 10, 10_000, 1); assert_eq!(executor.verify_transaction(txn.clone()), None); assert_eq!( executor.execute_transaction(txn).status(), &TransactionStatus::Keep(VMStatus::Execution(ExecutionStatus::Executed)) ); } #[test] fn test_dependency_fails_verification() { let mut executor = FakeExecutor::from_genesis_with_options(VMPublishingOption::Open); // Get a module that fails verification into the store. let bad_module_code = " modules: module Test { resource R1 { } struct S1 { r1: R#Self.R1 } public new_S1(): V#Self.S1 { let s: V#Self.S1; let r: R#Self.R1; r = R1 {}; s = S1 { r1: move(r) }; return move(s); } } script: main() { } "; let compiler = Compiler { code: bad_module_code, ..Compiler::default() }; let mut modules = compiler .into_compiled_program() .expect("Failed to compile") .modules; let module = modules.swap_remove(0); executor.add_module(&module.self_id(), &module); // Create a transaction that tries to use that module. let sender = AccountData::new(1_000_000, 10); executor.add_account_data(&sender); let code = " import 0x0.Test; main() { let x: V#Test.S1; x = Test.new_S1(); return; } "; let compiler = Compiler { code, address: *sender.address(), // This is OK because we *know* the module is unverified. extra_deps: vec![VerifiedModule::bypass_verifier_DANGEROUS_FOR_TESTING_ONLY( module, )], ..Compiler::default() }; let program = compiler.into_program(vec![]).expect("Failed to compile"); let txn = sender .account() .create_signed_txn_impl(*sender.address(), program, 10, 10_000, 1); // As of now, we don't verify dependencies in verify_transaction. assert_eq!(executor.verify_transaction(txn.clone()), None); let errors = match executor.execute_transaction(txn).status() { TransactionStatus::Discard(VMStatus::Verification(errors)) => errors.to_vec(), other => panic!("Unexpected status: {:?}", other), }; assert_matches!( &errors[0], VMVerificationStatus::Dependency(module_id, _) if module_id.address() == &AccountAddress::default() && module_id.name() == "Test" ); }
33.19462
99
0.633205
11edb2e1845b560596db3449e76f9642c347cd6e
1,164
use serde::{Deserialize, Serialize}; use crate::{ requests::{dynamic, json, Method}, types::True, }; /// Use this method to delete a sticker from a set created by the bot. Returns True on success. #[serde_with_macros::skip_serializing_none] #[derive(Debug, PartialEq, Eq, Hash, Clone, Deserialize, Serialize)] pub struct DeleteStickerFromSet { /// File identifier of the sticker sticker: String, } impl Method for DeleteStickerFromSet { type Output = True; const NAME: &'static str = "deleteStickerFromSet"; } impl json::Payload for DeleteStickerFromSet {} impl dynamic::Payload for DeleteStickerFromSet { fn kind(&self) -> dynamic::Kind { dynamic::Kind::Json(serde_json::to_string(self).unwrap()) } } impl DeleteStickerFromSet { pub fn new<S>(sticker: S) -> Self where S: Into<String> { let sticker = sticker.into(); Self { sticker, } } } impl json::Request<'_, DeleteStickerFromSet> { pub fn sticker<T>(mut self, val: T) -> Self where T: Into<String> { self.payload.sticker = val.into(); self } }
22.823529
95
0.627148
76fe0eab0bf51ada4f477d197e1edbc41afe404b
10,199
#[doc = "Register `DOUT_MODE` reader"] pub struct R(crate::R<DOUT_MODE_SPEC>); impl core::ops::Deref for R { type Target = crate::R<DOUT_MODE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<DOUT_MODE_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<DOUT_MODE_SPEC>) -> Self { R(reader) } } #[doc = "Register `DOUT_MODE` writer"] pub struct W(crate::W<DOUT_MODE_SPEC>); impl core::ops::Deref for W { type Target = crate::W<DOUT_MODE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<DOUT_MODE_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<DOUT_MODE_SPEC>) -> Self { W(writer) } } #[doc = "Field `DOUT0_MODE` reader - The output signal 0 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] pub struct DOUT0_MODE_R(crate::FieldReader<bool, bool>); impl DOUT0_MODE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { DOUT0_MODE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DOUT0_MODE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DOUT0_MODE` writer - The output signal 0 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] pub struct DOUT0_MODE_W<'a> { w: &'a mut W, } impl<'a> DOUT0_MODE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Field `DOUT1_MODE` reader - The output signal 1 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] pub struct DOUT1_MODE_R(crate::FieldReader<bool, bool>); impl DOUT1_MODE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { DOUT1_MODE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DOUT1_MODE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DOUT1_MODE` writer - The output signal 1 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] pub struct DOUT1_MODE_W<'a> { w: &'a mut W, } impl<'a> DOUT1_MODE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Field `DOUT2_MODE` reader - The output signal 2 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] pub struct DOUT2_MODE_R(crate::FieldReader<bool, bool>); impl DOUT2_MODE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { DOUT2_MODE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DOUT2_MODE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DOUT2_MODE` writer - The output signal 2 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] pub struct DOUT2_MODE_W<'a> { w: &'a mut W, } impl<'a> DOUT2_MODE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "Field `DOUT3_MODE` reader - The output signal 3 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] pub struct DOUT3_MODE_R(crate::FieldReader<bool, bool>); impl DOUT3_MODE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { DOUT3_MODE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DOUT3_MODE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DOUT3_MODE` writer - The output signal 3 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] pub struct DOUT3_MODE_W<'a> { w: &'a mut W, } impl<'a> DOUT3_MODE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } impl R { #[doc = "Bit 0 - The output signal 0 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] #[inline(always)] pub fn dout0_mode(&self) -> DOUT0_MODE_R { DOUT0_MODE_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - The output signal 1 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] #[inline(always)] pub fn dout1_mode(&self) -> DOUT1_MODE_R { DOUT1_MODE_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - The output signal 2 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] #[inline(always)] pub fn dout2_mode(&self) -> DOUT2_MODE_R { DOUT2_MODE_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - The output signal 3 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] #[inline(always)] pub fn dout3_mode(&self) -> DOUT3_MODE_R { DOUT3_MODE_R::new(((self.bits >> 3) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - The output signal 0 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] #[inline(always)] pub fn dout0_mode(&mut self) -> DOUT0_MODE_W { DOUT0_MODE_W { w: self } } #[doc = "Bit 1 - The output signal 1 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] #[inline(always)] pub fn dout1_mode(&mut self) -> DOUT1_MODE_W { DOUT1_MODE_W { w: self } } #[doc = "Bit 2 - The output signal 2 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] #[inline(always)] pub fn dout2_mode(&mut self) -> DOUT2_MODE_W { DOUT2_MODE_W { w: self } } #[doc = "Bit 3 - The output signal 3 is delayed by the SPI module clock, 0: output without delayed, 1: output delay for a SPI module clock cycle at its negative edge. Can be configured in CONF state."] #[inline(always)] pub fn dout3_mode(&mut self) -> DOUT3_MODE_W { DOUT3_MODE_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "SPI output delay mode configuration\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [dout_mode](index.html) module"] pub struct DOUT_MODE_SPEC; impl crate::RegisterSpec for DOUT_MODE_SPEC { type Ux = u32; } #[doc = "`read()` method returns [dout_mode::R](R) reader structure"] impl crate::Readable for DOUT_MODE_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [dout_mode::W](W) writer structure"] impl crate::Writable for DOUT_MODE_SPEC { type Writer = W; } #[doc = "`reset()` method sets DOUT_MODE to value 0"] impl crate::Resettable for DOUT_MODE_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
39.996078
425
0.633101
e8b86e07b60a80706bf82c31addd70834fec2860
3,332
use crate::interpreter::environment::EnvironmentId; use crate::interpreter::error::Error; use crate::interpreter::interpreter::Interpreter; use crate::interpreter::value::Value; use crate::interpreter::library; pub fn equal_question( interpreter: &mut Interpreter, _environment: EnvironmentId, values: Vec<Value>, ) -> Result<Value, Error> { if values.len() != 2 { return Error::invalid_argument_count_error( "Built-in function `equal?' must take exactly one argument.", ) .into(); } let mut values = values; let value1 = values.remove(0); let value2 = values.remove(0); let result = library::deep_equal(interpreter, value1, value2)?; Ok(Value::Boolean(result)) } #[cfg(test)] mod tests { use super::*; #[allow(unused_imports)] use nia_basic_assertions::*; #[allow(unused_imports)] use crate::utils; #[test] fn returns_correct_comparison_results_for_equal_values() { let mut interpreter = Interpreter::new(); let pairs = vec![ ("(equal? 1 1)", "#t"), ("(equal? 1.1 1.1)", "#t"), ("(equal? #t #t)", "#t"), ("(equal? #f #f)", "#t"), ("(equal? \"string\" \"string\")", "#t"), ("(equal? 'symbol 'symbol)", "#t"), ("(equal? :keyword :keyword)", "#t"), ("(equal? {:a 1} {:a 1})", "#t"), ("(equal? '(1 2) '(1 2))", "#t"), ("(equal? #(+ %1 %2) #(+ %1 %2))", "#t"), ]; utils::assert_results_are_equal(&mut interpreter, pairs); } #[test] fn returns_correct_comparison_results_for_not_equal_values() { let mut interpreter = Interpreter::new(); let pairs = vec![ ("(equal? 1 2)", "#f"), ("(equal? 1.1 1.2)", "#f"), ("(equal? #t #f)", "#f"), ("(equal? #f #t)", "#f"), ("(equal? \"string-1\" \"string-2\")", "#f"), ("(equal? 'symbol-1 'symbol-2)", "#f"), ("(equal? :keyword-1 :keyword-2)", "#f"), ("(equal? {:a 1} {:a 2})", "#f"), ("(equal? '(1 2) '(1 3))", "#f"), ("(equal? #(+ %1 %2) #(+ %1 %3))", "#f"), ]; utils::assert_results_are_equal(&mut interpreter, pairs); } #[test] fn returns_false_for_values_of_different_types() { let mut interpreter = Interpreter::new(); let pairs = vec![ ("(equal? 1 2.2)", "#f"), ("(equal? 1.1 1)", "#f"), ("(equal? #t \"string\")", "#f"), ("(equal? #f 'symbol)", "#f"), ("(equal? \"string-1\" :keyword)", "#f"), ("(equal? 'symbol-1 2)", "#f"), ("(equal? :keyword-1 1)", "#f"), ("(equal? {:a 1} 4)", "#f"), ("(equal? '(1 2) 5)", "#f"), ("(equal? #(+ %1 %2) 5)", "#f"), ]; utils::assert_results_are_equal(&mut interpreter, pairs); } #[test] fn returns_invalid_argument_error_count_when_not_enough_arguments_were_provided( ) { let mut interpreter = Interpreter::new(); let code_vector = vec!["(equal?)", "(equal? 1)", "(equal? 1 2 3)"]; utils::assert_results_are_invalid_argument_count_errors( &mut interpreter, code_vector, ); } }
29.486726
84
0.487995
c15548decb2eefaf3d121837395b7ea93cff3b7e
7,274
use crate::error::*; use crate::*; pub type EditorPkgInstaller = Installer<UnityEditor, Pkg, InstallerWithDestination>; pub type ModulePkgNativeInstaller = Installer<UnityModule, Pkg, BaseInstaller>; pub type ModulePkgInstaller = Installer<UnityModule, Pkg, InstallerWithDestination>; impl<V, I> Installer<V, Pkg, I> { fn move_files<P: AsRef<Path>, D: AsRef<Path>>(&self, source: P, destination: D) -> Result<()> { let source = source.as_ref(); let destination = destination.as_ref(); debug!( "move all files from {} into {}", source.display(), destination.display() ); for entry in fs::read_dir(&source)?.filter_map(io::Result::ok) { let new_location = destination.join(entry.file_name()); debug!( "move {} to {}", entry.path().display(), new_location.display() ); if new_location.exists() && new_location.is_dir() { warn!( "target directory already exists. {}", new_location.display() ); warn!("delete directory: {}", new_location.display()); fs::remove_dir_all(&new_location)?; } fs::rename(entry.path(), &new_location)?; } Ok(()) } fn xar<P: AsRef<Path>, D: AsRef<Path>>(&self, installer: P, destination: D) -> Result<()> { let installer = installer.as_ref(); let destination = destination.as_ref(); debug!( "unpack installer {} to temp destination {}", installer.display(), destination.display() ); let child = Command::new("xar") .arg("-x") .arg("-f") .arg(installer) .arg("-C") .arg(destination) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; let output = child.wait_with_output()?; if !output.status.success() { return Err(format!( "failed to extract installer from pkg package:/n{}", String::from_utf8_lossy(&output.stderr) ) .into()); } Ok(()) } fn untar<P: AsRef<Path>, D: AsRef<Path>>( &self, base_payload_path: P, destination: D, ) -> Result<()> { let base_payload_path = base_payload_path.as_ref(); let payload = self.find_payload(&base_payload_path)?; debug!("untar payload at {}", payload.display()); self.tar(&payload, destination) } fn tar<P: AsRef<Path>, D: AsRef<Path>>(&self, source: P, destination: D) -> Result<()> { let source = source.as_ref(); let destination = destination.as_ref(); let tar_child = Command::new("tar") .arg("-C") .arg(destination) .arg("-zmxf") .arg(source) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; let tar_output = tar_child.wait_with_output()?; if !tar_output.status.success() { return Err(format!( "failed to untar payload:/n{}", String::from_utf8_lossy(&tar_output.stderr) ) .into()); } Ok(()) } } impl EditorPkgInstaller { fn cleanup_editor<D: AsRef<Path>>(&self, destination: D) -> Result<()> { use std::fs; let destination = destination.as_ref(); let tmp_unity_directory = destination.join("Unity"); if !tmp_unity_directory.exists() { return Err("error extracting installer".into()); } self.move_files(&tmp_unity_directory, &destination)?; fs::remove_dir_all(&tmp_unity_directory)?; Ok(()) } } impl ModulePkgInstaller { fn cleanup_ios_support<D: AsRef<Path>>(&self, destination: D) -> Result<()> { use std::fs; let destination = destination.as_ref(); debug!("cleanup ios support package at {}", destination.display()); let tmp_ios_support_directory = destination.join("iOSSupport"); if tmp_ios_support_directory.exists() { debug!("move ios files from {} to {}", tmp_ios_support_directory.display(), destination.display()); self.move_files(&tmp_ios_support_directory, &destination)?; fs::remove_dir_all(&tmp_ios_support_directory)?; } Ok(()) } } impl InstallHandler for EditorPkgInstaller { fn install_handler(&self) -> Result<()> { let destination = self.destination(); let installer = self.installer(); debug!( "install editor from pkg {} to {}", installer.display(), destination.display() ); let tmp_destination = destination.join("tmp"); DirBuilder::new().recursive(true).create(&tmp_destination)?; self.xar(installer, &tmp_destination)?; self.untar(&tmp_destination, destination)?; self.cleanup_editor(destination)?; self.cleanup(&tmp_destination)?; Ok(()) } fn error_handler(&self) { self.cleanup_directory_failable(&self.destination()); } } impl InstallHandler for ModulePkgInstaller { fn install_handler(&self) -> Result<()> { let destination = self.destination(); let installer = self.installer(); debug!( "install module from pkg {} to {}", installer.display(), destination.display() ); let tmp_destination = destination.join("tmp"); DirBuilder::new().recursive(true).create(&tmp_destination)?; self.xar(installer, &tmp_destination)?; self.untar(&tmp_destination, destination)?; self.cleanup_ios_support(destination)?; self.cleanup(&tmp_destination)?; Ok(()) } fn after_install(&self) -> Result<()> { if let Some((from, to)) = &self.rename() { uvm_move_dir::move_dir(from, to).chain_err(|| "failed to rename installed module")?; } Ok(()) } fn error_handler(&self) { self.cleanup_directory_failable(&self.destination()); } } impl InstallHandler for ModulePkgNativeInstaller { fn install_handler(&self) -> Result<()> { let installer = self.installer(); debug!("install from pkg {}", installer.display()); let child = Command::new("sudo") .arg("installer") .arg("-package") .arg(installer) .arg("-target") .arg("/") .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; let output = child.wait_with_output()?; if !output.status.success() { return Err(format!( "failed to install {}\n{}", installer.display(), String::from_utf8_lossy(&output.stderr) ) .into()); } Ok(()) } fn after_install(&self) -> Result<()> { if let Some((from, to)) = &self.rename() { uvm_move_dir::move_dir(from, to).chain_err(|| "failed to rename installed module")?; } Ok(()) } }
31.903509
111
0.542755
14c98bd80f9c1221474014b99531963a07c26223
896
#![feature(proc_macro_hygiene, decl_macro)] #[macro_use] extern crate rocket; use beeline::Config; use beeline_rocket::BeelineMiddleware; #[get("/")] fn index() -> &'static str { "Hello, world!" } #[post("/")] fn index_post() -> &'static str { "Hello, world through a post!" } fn main() { let mut config = Config::default(); if let Some(api_key) = option_env!("HONEYCOMB_API_KEY") { config.client_config.options.api_key = api_key.to_string(); } if let Some(dataset) = option_env!("HONEYCOMB_DATASET") { config.client_config.options.dataset = dataset.to_string(); } config.service_name = Some("beeline-rocket-simple".to_string()); let client = beeline::init(config); let middleware = BeelineMiddleware::new(client); rocket::ignite() .attach(middleware) .mount("/", routes![index, index_post]) .launch(); }
24.216216
68
0.643973
bba330c28bbb561373023cb3746084720513cfeb
1,035
extern crate num; use std::iter::Product; use num::{PrimInt, Unsigned}; /// Find the factorial of n fn factorial<T>(n: T) -> T where T: PrimInt + Unsigned { num::range(T::one(), n + T::one()).product() } #[cfg(test)] mod tests { #[allow(unused_imports)] use super::*; fn test_factorial_types(n: u8, expected: u8) { assert_eq!(factorial::<u8>(n as u8), expected as u8); assert_eq!(factorial::<u16>(n as u16), expected as u16); assert_eq!(factorial::<u32>(n as u32), expected as u32); assert_eq!(factorial::<u64>(n as u64), expected as u64); } #[test] fn test_factorial() { test_factorial_types(0, 1); test_factorial_types(1, 1); test_factorial_types(2, 2); test_factorial_types(3, 6); test_factorial_types(4, 24); } } fn main() { println!("u8: 3! = {}", factorial(3_u8)); println!("u16: 3! = {}", factorial(3_u16)); println!("u32: 3! = {}", factorial(3_u32)); println!("u64: 3! = {}", factorial(3_u64)); }
25.243902
64
0.57971
1a9e20e79fe1ed0ed9a455138d7d77a145c257c9
8,707
use crate::infer::free_regions::FreeRegionMap; use crate::infer::{GenericKind, InferCtxt}; use crate::traits::query::OutlivesBound; use rustc_data_structures::fx::FxHashMap; use rustc_hir as hir; use rustc_middle::ty; use super::explicit_outlives_bounds; /// The `OutlivesEnvironment` collects information about what outlives /// what in a given type-checking setting. For example, if we have a /// where-clause like `where T: 'a` in scope, then the /// `OutlivesEnvironment` would record that (in its /// `region_bound_pairs` field). Similarly, it contains methods for /// processing and adding implied bounds into the outlives /// environment. /// /// Other code at present does not typically take a /// `&OutlivesEnvironment`, but rather takes some of its fields (e.g., /// `process_registered_region_obligations` wants the /// region-bound-pairs). There is no mistaking it: the current setup /// of tracking region information is quite scattered! The /// `OutlivesEnvironment`, for example, needs to sometimes be combined /// with the `middle::RegionRelations`, to yield a full picture of how /// (lexical) lifetimes interact. However, I'm reluctant to do more /// refactoring here, since the setup with NLL is quite different. /// For example, NLL has no need of `RegionRelations`, and is solely /// interested in the `OutlivesEnvironment`. -nmatsakis #[derive(Clone)] pub struct OutlivesEnvironment<'tcx> { pub param_env: ty::ParamEnv<'tcx>, free_region_map: FreeRegionMap<'tcx>, // Contains, for each body B that we are checking (that is, the fn // item, but also any nested closures), the set of implied region // bounds that are in scope in that particular body. // // Example: // // ``` // fn foo<'a, 'b, T>(x: &'a T, y: &'b ()) { // bar(x, y, |y: &'b T| { .. } // body B1) // } // body B0 // ``` // // Here, for body B0, the list would be `[T: 'a]`, because we // infer that `T` must outlive `'a` from the implied bounds on the // fn declaration. // // For the body B1, the list would be `[T: 'a, T: 'b]`, because we // also can see that -- within the closure body! -- `T` must // outlive `'b`. This is not necessarily true outside the closure // body, since the closure may never be called. // // We collect this map as we descend the tree. We then use the // results when proving outlives obligations like `T: 'x` later // (e.g., if `T: 'x` must be proven within the body B1, then we // know it is true if either `'a: 'x` or `'b: 'x`). region_bound_pairs_map: FxHashMap<hir::HirId, RegionBoundPairs<'tcx>>, // Used to compute `region_bound_pairs_map`: contains the set of // in-scope region-bound pairs thus far. region_bound_pairs_accum: RegionBoundPairs<'tcx>, } /// "Region-bound pairs" tracks outlives relations that are known to /// be true, either because of explicit where-clauses like `T: 'a` or /// because of implied bounds. pub type RegionBoundPairs<'tcx> = Vec<(ty::Region<'tcx>, GenericKind<'tcx>)>; impl<'a, 'tcx> OutlivesEnvironment<'tcx> { pub fn new(param_env: ty::ParamEnv<'tcx>) -> Self { let mut env = OutlivesEnvironment { param_env, free_region_map: Default::default(), region_bound_pairs_map: Default::default(), region_bound_pairs_accum: vec![], }; env.add_outlives_bounds(None, explicit_outlives_bounds(param_env)); env } /// Borrows current value of the `free_region_map`. pub fn free_region_map(&self) -> &FreeRegionMap<'tcx> { &self.free_region_map } /// Borrows current value of the `region_bound_pairs`. pub fn region_bound_pairs_map(&self) -> &FxHashMap<hir::HirId, RegionBoundPairs<'tcx>> { &self.region_bound_pairs_map } /// Returns ownership of the `free_region_map`. pub fn into_free_region_map(self) -> FreeRegionMap<'tcx> { self.free_region_map } /// This is a hack to support the old-skool regionck, which /// processes region constraints from the main function and the /// closure together. In that context, when we enter a closure, we /// want to be able to "save" the state of the surrounding a /// function. We can then add implied bounds and the like from the /// closure arguments into the environment -- these should only /// apply in the closure body, so once we exit, we invoke /// `pop_snapshot_post_closure` to remove them. /// /// Example: /// /// ``` /// fn foo<T>() { /// callback(for<'a> |x: &'a T| { /// // ^^^^^^^ not legal syntax, but probably should be /// // within this closure body, `T: 'a` holds /// }) /// } /// ``` /// /// This "containment" of closure's effects only works so well. In /// particular, we (intentionally) leak relationships between free /// regions that are created by the closure's bounds. The case /// where this is useful is when you have (e.g.) a closure with a /// signature like `for<'a, 'b> fn(x: &'a &'b u32)` -- in this /// case, we want to keep the relationship `'b: 'a` in the /// free-region-map, so that later if we have to take `LUB('b, /// 'a)` we can get the result `'b`. /// /// I have opted to keep **all modifications** to the /// free-region-map, however, and not just those that concern free /// variables bound in the closure. The latter seems more correct, /// but it is not the existing behavior, and I could not find a /// case where the existing behavior went wrong. In any case, it /// seems like it'd be readily fixed if we wanted. There are /// similar leaks around givens that seem equally suspicious, to /// be honest. --nmatsakis pub fn push_snapshot_pre_closure(&self) -> usize { self.region_bound_pairs_accum.len() } /// See `push_snapshot_pre_closure`. pub fn pop_snapshot_post_closure(&mut self, len: usize) { self.region_bound_pairs_accum.truncate(len); } /// Save the current set of region-bound pairs under the given `body_id`. pub fn save_implied_bounds(&mut self, body_id: hir::HirId) { let old = self.region_bound_pairs_map.insert(body_id, self.region_bound_pairs_accum.clone()); assert!(old.is_none()); } /// Processes outlives bounds that are known to hold, whether from implied or other sources. /// /// The `infcx` parameter is optional; if the implied bounds may /// contain inference variables, it must be supplied, in which /// case we will register "givens" on the inference context. (See /// `RegionConstraintData`.) pub fn add_outlives_bounds<I>( &mut self, infcx: Option<&InferCtxt<'a, 'tcx>>, outlives_bounds: I, ) where I: IntoIterator<Item = OutlivesBound<'tcx>>, { // Record relationships such as `T:'x` that don't go into the // free-region-map but which we use here. for outlives_bound in outlives_bounds { debug!("add_outlives_bounds: outlives_bound={:?}", outlives_bound); match outlives_bound { OutlivesBound::RegionSubRegion( r_a @ (&ty::ReEarlyBound(_) | &ty::ReFree(_)), &ty::ReVar(vid_b), ) => { infcx.expect("no infcx provided but region vars found").add_given(r_a, vid_b); } OutlivesBound::RegionSubParam(r_a, param_b) => { self.region_bound_pairs_accum.push((r_a, GenericKind::Param(param_b))); } OutlivesBound::RegionSubProjection(r_a, projection_b) => { self.region_bound_pairs_accum .push((r_a, GenericKind::Projection(projection_b))); } OutlivesBound::RegionSubRegion(r_a, r_b) => { // In principle, we could record (and take // advantage of) every relationship here, but // we are also free not to -- it simply means // strictly less that we can successfully type // check. Right now we only look for things // relationships between free regions. (It may // also be that we should revise our inference // system to be more general and to make use // of *every* relationship that arises here, // but presently we do not.) self.free_region_map.relate_regions(r_a, r_b); } } } } }
43.318408
98
0.623636
48a81730a4aedc1388f5ae90ad29e8047400ff7d
143
pub mod frame; pub mod render; pub mod player; pub mod shot; pub mod invaders; pub const NUM_ROWS: usize = 20; pub const NUM_COLS: usize = 40;
17.875
31
0.734266
39c0a226006effc40cde19181b3ffec70fd08fad
31,114
use crate::pb::{FlatUnixFs, PBLink, UnixFs, UnixFsType, DAG_PB}; use alloc::borrow::Cow; use core::fmt; use libipld::multihash::{Code, MultihashDigest}; use libipld::Cid; use quick_protobuf::{MessageWrite, Writer}; /// File tree builder. Implements [`core::default::Default`] which tracks the recent defaults. /// /// Custom file tree builder can be created with [`FileAdder::builder()`] and configuring the /// chunker and collector. /// /// Current implementation maintains an internal buffer for the block creation and uses a /// non-customizable hash function to produce Cid version 0 links. Currently does not support /// inline links. #[derive(Default)] pub struct FileAdder { chunker: Chunker, collector: Collector, block_buffer: Vec<u8>, // all unflushed links as a flat vec; this is compacted as we grow and need to create a link // block for the last N blocks, as decided by the collector. // FIXME: this is a cause of likely "accidentally quadratic" behavior visible when adding a // large file and using a minimal chunk size. Could be that this must be moved to Collector to // help collector (or layout) to decide how this should be persisted. unflushed_links: Vec<Link>, } impl fmt::Debug for FileAdder { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!( fmt, "FileAdder {{ chunker: {:?}, block_buffer: {}/{}, unflushed_links: {} }}", self.chunker, self.block_buffer.len(), self.block_buffer.capacity(), LinkFormatter(&self.unflushed_links), ) } } struct LinkFormatter<'a>(&'a [Link]); impl fmt::Display for LinkFormatter<'_> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let mut iter = self.0.iter().peekable(); write!(fmt, "[")?; let mut current = match iter.peek() { Some(Link { depth, .. }) => depth, None => return write!(fmt, "]"), }; let mut count = 0; for Link { depth: next_depth, .. } in iter { if current == next_depth { count += 1; } else { write!(fmt, "{}: {}/", current, count)?; let steps_between = if current > next_depth { current - next_depth } else { next_depth - current }; for _ in 0..steps_between - 1 { write!(fmt, "0/")?; } count = 1; current = next_depth; } } write!(fmt, "{}: {}]", current, count) } } /// Represents an intermediate structure which will be serialized into link blocks as both PBLink /// and UnixFs::blocksize. Also holds `depth`, which helps with compaction of the link blocks. struct Link { /// Depth of this link. Zero is leaf, and anything above it is, at least for /// [`BalancedCollector`], the compacted link blocks. depth: usize, /// The link target target: Cid, /// Total size is dag-pb specific part of the link: aggregated size of the linked subtree. total_size: u64, /// File size is the unixfs specific blocksize for this link. In UnixFs link blocks, there is a /// UnixFs::blocksizes item for each link. file_size: u64, } impl fmt::Debug for Link { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Link") .field("depth", &self.depth) .field("target", &format_args!("{}", self.target)) .field("total_size", &self.total_size) .field("file_size", &self.file_size) .finish() } } /// Convenience type to facilitate configuring [`FileAdder`]s. #[derive(Default)] pub struct FileAdderBuilder { chunker: Chunker, collector: Collector, } impl FileAdderBuilder { /// Configures the builder to use the given chunker. pub fn with_chunker(self, chunker: Chunker) -> Self { FileAdderBuilder { chunker, ..self } } /// Configures the builder to use the given collector or layout. pub fn with_collector(self, collector: impl Into<Collector>) -> Self { FileAdderBuilder { collector: collector.into(), ..self } } /// Returns a new FileAdder pub fn build(self) -> FileAdder { let FileAdderBuilder { chunker, collector } = self; FileAdder { chunker, collector, ..Default::default() } } } impl FileAdder { /// Returns a [`FileAdderBuilder`] for creating a non-default FileAdder. pub fn builder() -> FileAdderBuilder { FileAdderBuilder::default() } /// Returns the likely amount of buffering the file adding will work with best. /// /// When using the size based chunker and input larger than or equal to the hint is `push()`'ed /// to the chunker, the internal buffer will not be used. pub fn size_hint(&self) -> usize { self.chunker.size_hint() } /// Called to push new file bytes into the tree builder. /// /// Returns the newly created blocks (at most 2) and their respective Cids, and the amount of /// `input` consumed. pub fn push(&mut self, input: &[u8]) -> (impl Iterator<Item = (Cid, Vec<u8>)>, usize) { let (accepted, ready) = self.chunker.accept(input, &self.block_buffer); if self.block_buffer.is_empty() && ready { // save single copy as the caller is giving us whole chunks. // // TODO: though, this path does make one question if there is any point in keeping // block_buffer and chunker here; perhaps FileAdder should only handle pre-chunked // blocks and user takes care of chunking (and buffering)? // // cat file | my_awesome_chunker | my_brilliant_collector let leaf = Self::flush_buffered_leaf(accepted, &mut self.unflushed_links, false); assert!(leaf.is_some(), "chunk completed, must produce a new block"); self.block_buffer.clear(); let links = self.flush_buffered_links(false); (leaf.into_iter().chain(links.into_iter()), accepted.len()) } else { // slower path as we manage the buffer. if self.block_buffer.capacity() == 0 { // delay the internal buffer creation until this point, as the caller clearly wants // to use it. self.block_buffer.reserve(self.size_hint()); } self.block_buffer.extend_from_slice(accepted); let written = accepted.len(); let (leaf, links) = if !ready { // a new block did not become ready, which means we couldn't have gotten a new cid. (None, Vec::new()) } else { // a new leaf must be output, as well as possibly a new link block let leaf = Self::flush_buffered_leaf( self.block_buffer.as_slice(), &mut self.unflushed_links, false, ); assert!(leaf.is_some(), "chunk completed, must produce a new block"); self.block_buffer.clear(); let links = self.flush_buffered_links(false); (leaf, links) }; (leaf.into_iter().chain(links.into_iter()), written) } } /// Called after the last [`FileAdder::push`] to finish the tree construction. /// /// Returns a list of Cids and their respective blocks. /// /// Note: the API will hopefully evolve in a direction which will not allocate a new Vec for /// every block in the near-ish future. pub fn finish(mut self) -> impl Iterator<Item = (Cid, Vec<u8>)> { let last_leaf = Self::flush_buffered_leaf( &self.block_buffer.as_slice(), &mut self.unflushed_links, true, ); let root_links = self.flush_buffered_links(true); // should probably error if there is neither? last_leaf.into_iter().chain(root_links.into_iter()) } /// Returns `None` when the input is empty but there are links, otherwise a new Cid and a /// block. fn flush_buffered_leaf( input: &[u8], unflushed_links: &mut Vec<Link>, finishing: bool, ) -> Option<(Cid, Vec<u8>)> { if input.is_empty() && (!finishing || !unflushed_links.is_empty()) { return None; } // for empty unixfs file the bytes is missing but filesize is present. let data = if !input.is_empty() { Some(Cow::Borrowed(input)) } else { None }; let filesize = Some(input.len() as u64); let inner = FlatUnixFs { links: Vec::new(), data: UnixFs { Type: UnixFsType::File, Data: data, filesize, // no blocksizes as there are no links ..Default::default() }, }; let (cid, vec) = render_and_hash(&inner); let total_size = vec.len(); let link = Link { depth: 0, target: cid.clone(), total_size: total_size as u64, file_size: input.len() as u64, }; unflushed_links.push(link); Some((cid, vec)) } fn flush_buffered_links(&mut self, finishing: bool) -> Vec<(Cid, Vec<u8>)> { self.collector .flush_links(&mut self.unflushed_links, finishing) } /// Test helper for collecting all of the produced blocks; probably not a good idea outside /// smaller test cases. When `amt` is zero, the whole content is processed at the speed of /// chunker, otherwise `all_content` is pushed at `amt` sized slices with the idea of catching /// bugs in chunkers. #[cfg(test)] fn collect_blocks(mut self, all_content: &[u8], mut amt: usize) -> Vec<(Cid, Vec<u8>)> { let mut written = 0; let mut blocks_received = Vec::new(); if amt == 0 { amt = all_content.len(); } while written < all_content.len() { let end = written + (all_content.len() - written).min(amt); let slice = &all_content[written..end]; let (blocks, pushed) = self.push(slice); blocks_received.extend(blocks); written += pushed; } let last_blocks = self.finish(); blocks_received.extend(last_blocks); blocks_received } } fn render_and_hash(flat: &FlatUnixFs<'_>) -> (Cid, Vec<u8>) { // TODO: as shown in later dagger we don't really need to render the FlatUnixFs fully; we could // either just render a fixed header and continue with the body OR links, though the links are // a bit more complicated. let mut out = Vec::with_capacity(flat.get_size()); let mut writer = Writer::new(&mut out); flat.write_message(&mut writer) .expect("unsure how this could fail"); let mh = Code::Sha2_256.digest(&out); let cid = Cid::new_v1(DAG_PB, mh); (cid, out) } /// Chunker strategy #[derive(Debug, Clone)] pub enum Chunker { /// Size based chunking Size(usize), } impl Default for Chunker { /// Returns a default chunker which matches go-ipfs 0.6 fn default() -> Self { Chunker::Size(256 * 1024) } } impl Chunker { fn accept<'a>(&mut self, input: &'a [u8], buffered: &[u8]) -> (&'a [u8], bool) { use Chunker::*; match self { Size(max) => { let l = input.len().min(*max - buffered.len()); let accepted = &input[..l]; let ready = buffered.len() + l >= *max; (accepted, ready) } } } fn size_hint(&self) -> usize { use Chunker::*; match self { Size(max) => *max, } } } /// Collector or layout strategy. For more information, see the [Layout section of the spec]. /// Currently only the default balanced collector/layout has been implemented. /// /// [Layout section of the spec]: https://github.com/ipfs/specs/blob/master/UNIXFS.md#layout #[derive(Debug, Clone)] pub enum Collector { /// Balanced trees. Balanced(BalancedCollector), } impl Default for Collector { fn default() -> Self { Collector::Balanced(Default::default()) } } impl Collector { fn flush_links(&mut self, pending: &mut Vec<Link>, finishing: bool) -> Vec<(Cid, Vec<u8>)> { use Collector::*; match self { Balanced(bc) => bc.flush_links(pending, finishing), } } } /// BalancedCollector creates balanced UnixFs trees, most optimized for random access to different /// parts of the file. Currently supports only link count threshold or the branching factor. #[derive(Clone)] pub struct BalancedCollector { branching_factor: usize, // reused between link block generation reused_links: Vec<PBLink<'static>>, // reused between link block generation reused_blocksizes: Vec<u64>, } impl fmt::Debug for BalancedCollector { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!( fmt, "BalancedCollector {{ branching_factor: {} }}", self.branching_factor ) } } impl Default for BalancedCollector { /// Returns a default collector which matches go-ipfs 0.6 /// /// The origin for 174 is not described in the the [specs], but has likely to do something /// with being "good enough" regarding prefetching when reading and allows reusing some of the /// link blocks if parts of a longer file change. /// /// [specs]: https://github.com/ipfs/specs/blob/master/UNIXFS.md fn default() -> Self { Self::with_branching_factor(174) } } impl From<BalancedCollector> for Collector { fn from(b: BalancedCollector) -> Self { Collector::Balanced(b) } } impl BalancedCollector { /// Configure Balanced collector with the given branching factor. pub fn with_branching_factor(branching_factor: usize) -> Self { assert!(branching_factor > 0); Self { branching_factor, reused_links: Vec::new(), reused_blocksizes: Vec::new(), } } /// In-place compression of the `pending` links to a balanced hierarchy. When `finishing`, the /// links will be compressed iteratively from the lowest level to produce a single root link /// block. fn flush_links(&mut self, pending: &mut Vec<Link>, finishing: bool) -> Vec<(Cid, Vec<u8>)> { /* file |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| links-0 |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|E|F|G| links-1 |-------|-------|-------|-------|-B-----|-C-----|-D-----|\ / links-2 |-A-----------------------------| ^^^ ^ one short \--- link.depth pending [A, B, C, D, E, F, G] #flush_buffered_links(...) first iteration: file |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| links-0 |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|E|F|G| links-1 |-------|-------|-------|-------|-B-----|-C-----|-D-----|=#1==| links-2 |-A-----------------------------| pending [A, B, C, D, E, F, G] => [A, B, C, D, 1] new link block #1 is created for E, F, and G. #flush_buffered_links(...) second iteration: file |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| links-0 |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-| links-1 |-------|-------|-------|-------|-B-----|-C-----|-D-----|-#1--| links-2 |-A-----------------------------|=========================#2==| pending [A, B, C, D, 1] => [A, 2] new link block #2 is created for B, C, D, and #1. #flush_buffered_links(...) last iteration: file |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| links-0 |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-| links-1 |-------|-------|-------|-------|-------|-------|-------|-#1--| links-2 |-A-----------------------------|-------------------------#2--| links-3 |=========================================================#3==| pending [A, 2] => [3] new link block #3 is created for A, and #2. (the root block) */ let mut ret = Vec::new(); let mut reused_links = core::mem::take(&mut self.reused_links); let mut reused_blocksizes = core::mem::take(&mut self.reused_blocksizes); if let Some(need) = self.branching_factor.checked_sub(reused_links.capacity()) { reused_links.reserve(need); } if let Some(need) = self .branching_factor .checked_sub(reused_blocksizes.capacity()) { reused_blocksizes.reserve(need); } 'outer: for level in 0.. { if pending.len() == 1 && finishing || pending.len() <= self.branching_factor && !finishing { // when there is just a single linking block left and we are finishing, we are // done. It might not be part of the `ret` as will be the case with single chunk // files for example. // // normally when not finishing we do nothing if we don't have enough links. break; } // when finishing, we iterate the level to completion in blocks of // self.branching_factor and *insert* values at the offset of the first compressed // link. on following iterations this will be the index after the higher level index. let mut starting_point = 0; // when creating the link blocks, start overwriting the pending links at the first // found link for this depth. this index will be incremented for successive link // blocks. let mut last_overwrite = None; while let Some(mut first_at) = &pending[starting_point..] .iter() .position(|Link { depth, .. }| depth == &level) { // fix first_at as absolute index from being possible relative to the // starting_point first_at += starting_point; if !finishing && pending[first_at..].len() <= self.branching_factor { if let Some(last_overwrite) = last_overwrite { // drain any processed pending.drain((last_overwrite + 1)..first_at); } break 'outer; } reused_links.clear(); reused_blocksizes.clear(); let mut nested_size = 0; let mut nested_total_size = 0; let last = (first_at + self.branching_factor).min(pending.len()); for (index, link) in pending[first_at..last].iter().enumerate() { assert_eq!( link.depth, level, "unexpected link depth {} when searching at level {} index {}", link.depth, level, index + first_at ); Self::partition_link( link, &mut reused_links, &mut reused_blocksizes, &mut nested_size, &mut nested_total_size, ); } debug_assert_eq!(reused_links.len(), reused_blocksizes.len()); let inner = FlatUnixFs { links: reused_links, data: UnixFs { Type: UnixFsType::File, filesize: Some(nested_size), blocksizes: reused_blocksizes, ..Default::default() }, }; let (cid, vec) = render_and_hash(&inner); // start overwriting at the first index of this level, then continue forward on // next iterations. let index = last_overwrite.map(|i| i + 1).unwrap_or(first_at); pending[index] = Link { depth: level + 1, target: cid.clone(), total_size: nested_total_size + vec.len() as u64, file_size: nested_size, }; ret.push((cid, vec)); reused_links = inner.links; reused_blocksizes = inner.data.blocksizes; starting_point = last; last_overwrite = Some(index); } if let Some(last_overwrite) = last_overwrite { pending.truncate(last_overwrite + 1); } // this holds regardless of finishing; we would had broken 'outer had there been less // than full blocks left. debug_assert_eq!( pending.iter().position(|l| l.depth == level), None, "should have no more of depth {}: {}", level, LinkFormatter(pending.as_slice()) ); } self.reused_links = reused_links; self.reused_blocksizes = reused_blocksizes; ret } /// Each link needs to be partitioned into the four mut arguments received by this function in /// order to produce the expected UnixFs output. fn partition_link( link: &Link, links: &mut Vec<PBLink<'static>>, blocksizes: &mut Vec<u64>, nested_size: &mut u64, nested_total_size: &mut u64, ) { links.push(PBLink { Hash: Some(link.target.to_bytes().into()), Name: Some("".into()), Tsize: Some(link.total_size), }); blocksizes.push(link.file_size); *nested_size += link.file_size; *nested_total_size += link.total_size; } } #[cfg(test)] mod tests { use super::{BalancedCollector, Chunker, FileAdder}; use crate::test_support::FakeBlockstore; use core::convert::TryFrom; use hex_literal::hex; use libipld::Cid; #[test] fn test_size_chunker() { assert_eq!(size_chunker_scenario(1, 4, 0), (1, true)); assert_eq!(size_chunker_scenario(2, 4, 0), (2, true)); assert_eq!(size_chunker_scenario(2, 1, 0), (1, false)); assert_eq!(size_chunker_scenario(2, 1, 1), (1, true)); assert_eq!(size_chunker_scenario(32, 3, 29), (3, true)); // this took some debugging time: assert_eq!(size_chunker_scenario(32, 4, 29), (3, true)); } fn size_chunker_scenario(max: usize, input_len: usize, existing_len: usize) -> (usize, bool) { let input = vec![0; input_len]; let existing = vec![0; existing_len]; let (accepted, ready) = Chunker::Size(max).accept(&input, &existing); (accepted.len(), ready) } #[test] fn favourite_single_block_file() { let blocks = FakeBlockstore::with_fixtures(); // everyones favourite content let content = b"foobar\n"; let mut adder = FileAdder::default(); { let (mut ready_blocks, bytes) = adder.push(content); assert!(ready_blocks.next().is_none()); assert_eq!(bytes, content.len()); } // real impl would probably hash this ... except maybe hashing is faster when done inline? // or maybe not let (_, file_block) = adder .finish() .next() .expect("there must have been the root block"); assert_eq!( blocks.get_by_str("QmRgutAxd8t7oGkSm4wmeuByG6M51wcTso6cubDdQtuEfL"), file_block.as_slice() ); } #[test] fn favourite_multi_block_file() { // root should be QmRJHYTNvC3hmd9gJQARxLR1QMEincccBV53bBw524yyq6 let blocks = FakeBlockstore::with_fixtures(); let content = b"foobar\n"; let adder = FileAdder::builder().with_chunker(Chunker::Size(2)).build(); let blocks_received = adder.collect_blocks(content, 0); // the order here is "fo", "ob", "ar", "\n", root block // while verifying the root Cid would be *enough* this is easier to eyeball, ... not really // that much but ... let expected = [ "bafybeih67h7bqbeufm26dhqulib7tsovzkojs7o2bkkbn47vcwss6gz44e", "bafybeig7xffxllfsbd6uq46yjbzk6wf5mxdtc5ykpvga33vubchiooil7y", "bafybeiafisl24tujqewigj3kjdr6m6ibhj4iw7aowatrfxyvbfoafvwnfq", "bafybeigmgmwown66u7j5pqancojrc5ry2pwzmnlvqnwg2rfcjfi6irgplu", "bafybeicbvfcf3ys43v7u4obvdypbdzdsddiqvagpwldjvlj7kyrkwkpm3u", ] .iter() .map(|key| { let cid = Cid::try_from(*key).unwrap(); let block = blocks.get_by_str(key).to_vec(); (cid, block) }) .collect::<Vec<_>>(); if blocks_received != expected { for ((actual_cid, actual_block), (expected_cid, expected_block)) in blocks_received.into_iter().zip(expected.into_iter()) { assert_eq!( actual_cid, expected_cid, "Expected\n\t{}\n\t{:02x?}\nActual\n\t{}\n\t{:02x?}", expected_cid, expected_block, actual_cid, actual_block ); } } } #[test] fn three_layers() { let content = b"Lorem ipsum dolor sit amet, sit enim montes aliquam. Cras non lorem, \ rhoncus condimentum, irure et ante. Pulvinar suscipit odio ante, et tellus a enim, \ wisi ipsum, vel rhoncus eget faucibus varius, luctus turpis nibh vel odio nulla pede."; assert!(content.len() > 174 && content.len() < 2 * 174); // go-ipfs 0.5 result: QmRQ6NZNUs4JrCT2y7tmCC1wUhjqYuTssB8VXbbN3rMffg, 239 blocks and root // root has two links: // - QmXUcuLGKc8SCMEqG4wgct6NKsSRZQfvB2FCfjDow1PfpB (174 links) // - QmeEn8dxWTzGAFKvyXoLj4oWbh9putL4vSw4uhLXJrSZhs (63 links) // // in future, if we ever add inline Cid generation this test would need to be changed not // to use those inline cids or raw leaves let adder = FileAdder::builder().with_chunker(Chunker::Size(1)).build(); let blocks_received = adder.collect_blocks(content, 0); assert_eq!(blocks_received.len(), 240); assert_eq!( blocks_received.last().unwrap().0.to_string(), "bafybeiedgegddvmfxlnikgwgxet6am5xkrhforeqb6wxyga3p7jmmge27u" ); } #[test] fn three_layers_all_subchunks() { let content = b"Lorem ipsum dolor sit amet, sit enim montes aliquam. Cras non lorem, \ rhoncus condimentum, irure et ante. Pulvinar suscipit odio ante, et tellus a enim, \ wisi ipsum, vel rhoncus eget faucibus varius, luctus turpis nibh vel odio nulla pede."; for amt in 1..32 { let adder = FileAdder::builder().with_chunker(Chunker::Size(32)).build(); let blocks_received = adder.collect_blocks(content, amt); assert_eq!( blocks_received.last().unwrap().0.to_string(), "bafybeiedxvsh3dwkublmenfbir3li4jf52opwcf5dgpi62h6yv4gd7klku", "amt: {}", amt ); } } #[test] fn empty_file() { let blocks = FileAdder::default().collect_blocks(b"", 0); assert_eq!(blocks.len(), 1); // 0a == field dag-pb body (unixfs) // 04 == dag-pb body len, varint, 4 bytes // 08 == field type tag, varint, 1 byte // 02 == field type (File) // 18 == field filesize tag, varint // 00 == filesize, varint, 1 byte assert_eq!(blocks[0].1.as_slice(), &hex!("0a 04 08 02 18 00")); assert_eq!( blocks[0].0.to_string(), "bafybeif7ztnhq65lumvvtr4ekcwd2ifwgm3awq4zfr3srh462rwyinlb4y" ); } #[test] fn full_link_block_and_a_byte() { let buf = vec![0u8; 2]; // this should produce a root with two links // +----------^---+ // | | // |----------------------| |-| <-- link blocks // ^^^^^^^^^^^^^^^^^^^^^^ ^ // 174 blocks \--- 1 block let branching_factor = 174; let mut adder = FileAdder::builder() .with_chunker(Chunker::Size(2)) .with_collector(BalancedCollector::with_branching_factor(branching_factor)) .build(); let mut blocks_count = 0; for _ in 0..branching_factor { let (blocks, written) = adder.push(buf.as_slice()); assert_eq!(written, buf.len()); blocks_count += blocks.count(); } let (blocks, written) = adder.push(&buf[0..1]); assert_eq!(written, 1); blocks_count += blocks.count(); let last_blocks = adder.finish().collect::<Vec<_>>(); blocks_count += last_blocks.len(); // chunks == 174 // one link block for 174 // one is for the single byte block // one is a link block for the singular single byte block // other is for the root block assert_eq!(blocks_count, branching_factor + 1 + 1 + 1 + 1); assert_eq!( last_blocks.last().unwrap().0.to_string(), "bafybeiai6dvlhomgaargeb677jd7bugp7pscqeakbvtapdlpvsfuedojqa" ); } #[test] fn full_link_block() { let buf = vec![0u8; 1]; let branching_factor = 174; let mut adder = FileAdder::builder() .with_chunker(Chunker::Size(1)) .with_collector(BalancedCollector::with_branching_factor(branching_factor)) .build(); let mut blocks_count = 0; for _ in 0..branching_factor { let (blocks, written) = adder.push(buf.as_slice()); assert_eq!(written, buf.len()); blocks_count += blocks.count(); } let mut last_blocks = adder.finish(); // go-ipfs waits until finish to get a single link block, no additional root block let last_block = last_blocks.next().expect("must not have flushed yet"); blocks_count += 1; assert_eq!(last_blocks.next(), None); assert_eq!( last_block.0.to_string(), "bafybeihkyby6yehjk25b3havpq4jxsfqxf54jizpjqodovxiuwav62uzpy" ); assert_eq!(blocks_count, 175); } }
35.316686
99
0.543967
29af5ca840a79a6aff1c464a54dc68bc67110e5e
11,242
use std::{collections::HashSet, time::Duration}; use cosmos_gravity::{ query::get_pending_send_to_eth, send::{cancel_send_to_eth, send_request_batch, send_to_eth}, }; use ethereum_gravity::{send_to_cosmos::send_to_cosmos, utils::get_tx_batch_nonce}; use futures::future::join_all; use gravity_proto::gravity::query_client::QueryClient as GravityQueryClient; use gravity_utils::{ clarity::Address as EthAddress, deep_space::{coin::Coin, Contact}, web30::{client::Web3, types::SendTxOption}, }; use rand::seq::SliceRandom; use tokio::time::sleep; use tonic::transport::Channel; use crate::{get_fee, one_eth, one_hundred_eth, utils::*, TOTAL_TIMEOUT}; const TIMEOUT: Duration = Duration::from_secs(120); /// The number of users we will be simulating for this test, each user /// will get one token from each token type in erc20_addresses and send it /// across the bridge to Cosmos as a deposit and then send it back to a different /// Ethereum address in a transaction batch /// So the total number of /// Ethereum sends = (2 * NUM_USERS) /// ERC20 sends = (erc20_addresses.len() * NUM_USERS) /// Gravity Deposits = (erc20_addresses.len() * NUM_USERS) /// Batches executed = erc20_addresses.len() * (NUM_USERS / 100) const NUM_USERS: usize = 100; /// Perform a stress test by sending thousands of /// transactions and producing large batches #[allow(clippy::too_many_arguments)] pub async fn transaction_stress_test( web30: &Web3, contact: &Contact, grpc_client: GravityQueryClient<Channel>, keys: Vec<ValidatorKeys>, gravity_address: EthAddress, erc20_addresses: Vec<EthAddress>, ) { let mut grpc_client = grpc_client; let no_relay_market_config = create_no_batch_requests_config(); start_orchestrators(keys.clone(), gravity_address, false, no_relay_market_config).await; // Generate 100 user keys to send ETH and multiple types of tokens let mut user_keys = Vec::new(); for _ in 0..NUM_USERS { user_keys.push(get_user_key()); } // the sending eth addresses need Ethereum to send ERC20 tokens to the bridge let sending_eth_addresses: Vec<EthAddress> = user_keys.iter().map(|i| i.eth_address).collect(); // the destination eth addresses need Ethereum to perform a contract call and get their erc20 balances let dest_eth_addresses: Vec<EthAddress> = user_keys.iter().map(|i| i.eth_dest_address).collect(); let mut eth_destinations = Vec::new(); eth_destinations.extend(sending_eth_addresses.clone()); eth_destinations.extend(dest_eth_addresses); send_eth_bulk(one_eth(), &eth_destinations, web30).await; info!("Sent {} addresses 1 ETH", NUM_USERS); // now we need to send all the sending eth addresses erc20's to send for token in erc20_addresses.iter() { send_erc20_bulk(one_hundred_eth(), *token, &sending_eth_addresses, web30).await; info!("Sent {} addresses 100 {}", NUM_USERS, token); } web30.wait_for_next_block(TOTAL_TIMEOUT).await.unwrap(); for token in erc20_addresses.iter() { let mut sends = Vec::new(); for keys in user_keys.iter() { let fut = send_to_cosmos( *token, gravity_address, one_hundred_eth(), keys.cosmos_address, keys.eth_key, TIMEOUT, web30, vec![SendTxOption::GasPriceMultiplier(5.0)], ); sends.push(fut); } let txids = join_all(sends).await; let mut wait_for_txid = Vec::new(); for txid in txids { let wait = web30.wait_for_transaction(txid.unwrap(), TIMEOUT, None); wait_for_txid.push(wait); } let results = join_all(wait_for_txid).await; for result in results { let result = result.unwrap(); result.block_number.unwrap(); } info!( "Locked 100 {} from {} into the Gravity Ethereum Contract", token, NUM_USERS ); web30.wait_for_next_block(TOTAL_TIMEOUT).await.unwrap(); } let check_all_deposists_bridged_to_cosmos = async { loop { let mut good = true; for keys in user_keys.iter() { let c_addr = keys.cosmos_address; let balances = contact.get_balances(c_addr).await.unwrap(); for token in erc20_addresses.iter() { let mut found = false; for balance in balances.iter() { if balance.denom.contains(&token.to_string()) && balance.amount == one_hundred_eth() { found = true; } } if !found { good = false; } } } if good { break; } sleep(Duration::from_secs(5)).await; } }; if tokio::time::timeout(TOTAL_TIMEOUT, check_all_deposists_bridged_to_cosmos) .await .is_err() { panic!( "Failed to perform all {} deposits to Cosmos!", user_keys.len() * erc20_addresses.len() ); } else { info!( "All {} deposits bridged to Cosmos successfully!", user_keys.len() * erc20_addresses.len() ); } let send_amount = one_hundred_eth() - 500u16.into(); let mut denoms = HashSet::new(); for token in erc20_addresses.iter() { let mut futs = Vec::new(); for keys in user_keys.iter() { let c_addr = keys.cosmos_address; let c_key = keys.cosmos_key; let e_dest_addr = keys.eth_dest_address; let balances = contact.get_balances(c_addr).await.unwrap(); // this way I don't have to hardcode a denom and we can change the way denoms are formed // without changing this test. let mut send_coin = None; for balance in balances { if balance.denom.contains(&token.to_string()) { send_coin = Some(balance.clone()); denoms.insert(balance.denom); } } let mut send_coin = send_coin.unwrap(); send_coin.amount = send_amount.clone(); let send_fee = Coin { denom: send_coin.denom.clone(), amount: 1u8.into(), }; let res = send_to_eth( c_key, e_dest_addr, send_coin, send_fee.clone(), send_fee, contact, ); futs.push(res); } let results = join_all(futs).await; for result in results { let result = result.unwrap(); trace!("SendToEth result {:?}", result); } info!( "Successfully placed {} {} into the tx pool", NUM_USERS, token ); } // randomly select a user to cancel their transaction, as part of this test // we make sure that this user withdraws absolutely zero tokens let mut rng = rand::thread_rng(); let user_who_cancels = user_keys.choose(&mut rng).unwrap(); let pending = get_pending_send_to_eth(&mut grpc_client, user_who_cancels.cosmos_address) .await .unwrap(); // if batch creation is made automatic this becomes a race condition we'll have to consider assert!(pending.transfers_in_batches.is_empty()); assert!(!pending.unbatched_transfers.is_empty()); let denom = denoms.iter().next().unwrap().clone(); let bridge_fee = Coin { denom, amount: 1u8.into(), }; // cancel all outgoing transactions for this user for tx in pending.unbatched_transfers { let res = cancel_send_to_eth( user_who_cancels.cosmos_key, bridge_fee.clone(), contact, tx.id, ) .await .unwrap(); info!("{:?}", res); } contact.wait_for_next_block(TIMEOUT).await.unwrap(); // check that the cancelation worked let pending = get_pending_send_to_eth(&mut grpc_client, user_who_cancels.cosmos_address) .await .unwrap(); info!("{:?}", pending); assert!(pending.transfers_in_batches.is_empty()); assert!(pending.unbatched_transfers.is_empty()); // this user will have someone else attempt to cancel their transaction let mut victim = None; for key in user_keys.iter() { if key != user_who_cancels { victim = Some(key); break; } } let pending = get_pending_send_to_eth(&mut grpc_client, victim.unwrap().cosmos_address) .await .unwrap(); // try to cancel the victims transactions and ensure failure for tx in pending.unbatched_transfers { let res = cancel_send_to_eth( user_who_cancels.cosmos_key, bridge_fee.clone(), contact, tx.id, ) .await; info!("{:?}", res); } for denom in denoms { info!("Requesting batch for {}", denom); let res = send_request_batch(keys[0].validator_key, denom, Some(get_fee()), contact) .await .unwrap(); info!("batch request response is {:?}", res); } let check_withdraws_from_ethereum = async { loop { let mut good = true; let mut found_canceled = false; for keys in user_keys.iter() { let e_dest_addr = keys.eth_dest_address; for token in erc20_addresses.iter() { let bal = get_erc20_balance_safe(*token, web30, e_dest_addr) .await .unwrap(); if bal != send_amount.clone() { if e_dest_addr == user_who_cancels.eth_address && bal == 0u8.into() { info!("We successfully found the user who canceled their sends!"); found_canceled = true; } else { good = false; } } } } if good && found_canceled { info!( "All {} withdraws to Ethereum bridged successfully!", NUM_USERS * erc20_addresses.len() ); break; } sleep(Duration::from_secs(5)).await; } }; if tokio::time::timeout(TOTAL_TIMEOUT, check_withdraws_from_ethereum) .await .is_err() { panic!( "Failed to perform all {} withdraws to Ethereum!", NUM_USERS * erc20_addresses.len() ); } // we should find a batch nonce greater than zero since all the batches // executed for token in erc20_addresses { assert!( get_tx_batch_nonce(gravity_address, token, keys[0].eth_key.to_address(), web30) .await .unwrap() > 0 ) } }
34.913043
106
0.569561
214473b6088818329cbb1479da1074c42eb4caad
113
use span::Span; #[derive(Debug, Clone)] pub struct IdentifierName { pub span: Span, pub name: String, }
14.125
27
0.654867
f47a4e86680a241f074188ae1d0199294739b852
15,146
use std::collections::{BTreeSet, HashMap}; use std::env; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; use cargo_platform::CfgExpr; use cargo_util::{paths, ProcessBuilder}; use super::BuildContext; use crate::core::compiler::{CompileKind, Metadata, Unit}; use crate::core::Package; use crate::util::{config, CargoResult, Config}; /// Structure with enough information to run `rustdoc --test`. pub struct Doctest { /// What's being doctested pub unit: Unit, /// Arguments needed to pass to rustdoc to run this test. pub args: Vec<OsString>, /// Whether or not -Zunstable-options is needed. pub unstable_opts: bool, /// The -Clinker value to use. pub linker: Option<PathBuf>, /// The script metadata, if this unit's package has a build script. /// /// This is used for indexing [`Compilation::extra_env`]. pub script_meta: Option<Metadata>, } /// Information about the output of a unit. #[derive(Ord, PartialOrd, Eq, PartialEq)] pub struct UnitOutput { /// The unit that generated this output. pub unit: Unit, /// Path to the unit's primary output (an executable or cdylib). pub path: PathBuf, /// The script metadata, if this unit's package has a build script. /// /// This is used for indexing [`Compilation::extra_env`]. pub script_meta: Option<Metadata>, } /// A structure returning the result of a compilation. pub struct Compilation<'cfg> { /// An array of all tests created during this compilation. pub tests: Vec<UnitOutput>, /// An array of all binaries created. pub binaries: Vec<UnitOutput>, /// An array of all cdylibs created. pub cdylibs: Vec<UnitOutput>, /// The crate names of the root units specified on the command-line. pub root_crate_names: Vec<String>, /// All directories for the output of native build commands. /// /// This is currently used to drive some entries which are added to the /// LD_LIBRARY_PATH as appropriate. /// /// The order should be deterministic. pub native_dirs: BTreeSet<PathBuf>, /// Root output directory (for the local package's artifacts) pub root_output: HashMap<CompileKind, PathBuf>, /// Output directory for rust dependencies. /// May be for the host or for a specific target. pub deps_output: HashMap<CompileKind, PathBuf>, /// The path to the host libdir for the compiler used sysroot_host_libdir: PathBuf, /// The path to libstd for each target sysroot_target_libdir: HashMap<CompileKind, PathBuf>, /// Extra environment variables that were passed to compilations and should /// be passed to future invocations of programs. /// /// The key is the build script metadata for uniquely identifying the /// `RunCustomBuild` unit that generated these env vars. pub extra_env: HashMap<Metadata, Vec<(String, String)>>, /// Libraries to test with rustdoc. pub to_doc_test: Vec<Doctest>, /// The target host triple. pub host: String, config: &'cfg Config, /// Rustc process to be used by default rustc_process: ProcessBuilder, /// Rustc process to be used for workspace crates instead of rustc_process rustc_workspace_wrapper_process: ProcessBuilder, /// Optional rustc process to be used for primary crates instead of either rustc_process or /// rustc_workspace_wrapper_process primary_rustc_process: Option<ProcessBuilder>, target_runners: HashMap<CompileKind, Option<(PathBuf, Vec<String>)>>, } impl<'cfg> Compilation<'cfg> { pub fn new<'a>(bcx: &BuildContext<'a, 'cfg>) -> CargoResult<Compilation<'cfg>> { let mut rustc = bcx.rustc().process(); let mut primary_rustc_process = bcx.build_config.primary_unit_rustc.clone(); let mut rustc_workspace_wrapper_process = bcx.rustc().workspace_process(); if bcx.config.extra_verbose() { rustc.display_env_vars(); rustc_workspace_wrapper_process.display_env_vars(); if let Some(rustc) = primary_rustc_process.as_mut() { rustc.display_env_vars(); } } Ok(Compilation { // TODO: deprecated; remove. native_dirs: BTreeSet::new(), root_output: HashMap::new(), deps_output: HashMap::new(), sysroot_host_libdir: bcx .target_data .info(CompileKind::Host) .sysroot_host_libdir .clone(), sysroot_target_libdir: bcx .all_kinds .iter() .map(|&kind| { ( kind, bcx.target_data.info(kind).sysroot_target_libdir.clone(), ) }) .collect(), tests: Vec::new(), binaries: Vec::new(), cdylibs: Vec::new(), root_crate_names: Vec::new(), extra_env: HashMap::new(), to_doc_test: Vec::new(), config: bcx.config, host: bcx.host_triple().to_string(), rustc_process: rustc, rustc_workspace_wrapper_process, primary_rustc_process, target_runners: bcx .build_config .requested_kinds .iter() .chain(Some(&CompileKind::Host)) .map(|kind| Ok((*kind, target_runner(bcx, *kind)?))) .collect::<CargoResult<HashMap<_, _>>>()?, }) } /// Returns a [`ProcessBuilder`] for running `rustc`. /// /// `is_primary` is true if this is a "primary package", which means it /// was selected by the user on the command-line (such as with a `-p` /// flag), see [`crate::core::compiler::Context::primary_packages`]. /// /// `is_workspace` is true if this is a workspace member. pub fn rustc_process( &self, unit: &Unit, is_primary: bool, is_workspace: bool, ) -> CargoResult<ProcessBuilder> { let rustc = if is_primary && self.primary_rustc_process.is_some() { self.primary_rustc_process.clone().unwrap() } else if is_workspace { self.rustc_workspace_wrapper_process.clone() } else { self.rustc_process.clone() }; let cmd = fill_rustc_tool_env(rustc, unit); self.fill_env(cmd, &unit.pkg, None, unit.kind, true) } /// Returns a [`ProcessBuilder`] for running `rustdoc`. pub fn rustdoc_process( &self, unit: &Unit, script_meta: Option<Metadata>, ) -> CargoResult<ProcessBuilder> { let rustdoc = ProcessBuilder::new(&*self.config.rustdoc()?); let cmd = fill_rustc_tool_env(rustdoc, unit); let mut p = self.fill_env(cmd, &unit.pkg, script_meta, unit.kind, true)?; unit.target.edition().cmd_edition_arg(&mut p); for crate_type in unit.target.rustc_crate_types() { p.arg("--crate-type").arg(crate_type.as_str()); } Ok(p) } /// Returns a [`ProcessBuilder`] appropriate for running a process for the /// host platform. /// /// This is currently only used for running build scripts. If you use this /// for anything else, please be extra careful on how environment /// variables are set! pub fn host_process<T: AsRef<OsStr>>( &self, cmd: T, pkg: &Package, ) -> CargoResult<ProcessBuilder> { self.fill_env( ProcessBuilder::new(cmd), pkg, None, CompileKind::Host, false, ) } pub fn target_runner(&self, kind: CompileKind) -> Option<&(PathBuf, Vec<String>)> { self.target_runners.get(&kind).and_then(|x| x.as_ref()) } /// Returns a [`ProcessBuilder`] appropriate for running a process for the /// target platform. This is typically used for `cargo run` and `cargo /// test`. /// /// `script_meta` is the metadata for the `RunCustomBuild` unit that this /// unit used for its build script. Use `None` if the package did not have /// a build script. pub fn target_process<T: AsRef<OsStr>>( &self, cmd: T, kind: CompileKind, pkg: &Package, script_meta: Option<Metadata>, ) -> CargoResult<ProcessBuilder> { let builder = if let Some((runner, args)) = self.target_runner(kind) { let mut builder = ProcessBuilder::new(runner); builder.args(args); builder.arg(cmd); builder } else { ProcessBuilder::new(cmd) }; self.fill_env(builder, pkg, script_meta, kind, false) } /// Prepares a new process with an appropriate environment to run against /// the artifacts produced by the build process. /// /// The package argument is also used to configure environment variables as /// well as the working directory of the child process. fn fill_env( &self, mut cmd: ProcessBuilder, pkg: &Package, script_meta: Option<Metadata>, kind: CompileKind, is_rustc_tool: bool, ) -> CargoResult<ProcessBuilder> { let mut search_path = Vec::new(); if is_rustc_tool { search_path.push(self.deps_output[&CompileKind::Host].clone()); search_path.push(self.sysroot_host_libdir.clone()); } else { search_path.extend(super::filter_dynamic_search_path( self.native_dirs.iter(), &self.root_output[&kind], )); search_path.push(self.deps_output[&kind].clone()); search_path.push(self.root_output[&kind].clone()); // For build-std, we don't want to accidentally pull in any shared // libs from the sysroot that ships with rustc. This may not be // required (at least I cannot craft a situation where it // matters), but is here to be safe. if self.config.cli_unstable().build_std.is_none() { search_path.push(self.sysroot_target_libdir[&kind].clone()); } } let dylib_path = paths::dylib_path(); let dylib_path_is_empty = dylib_path.is_empty(); search_path.extend(dylib_path.into_iter()); if cfg!(target_os = "macos") && dylib_path_is_empty { // These are the defaults when DYLD_FALLBACK_LIBRARY_PATH isn't // set or set to an empty string. Since Cargo is explicitly setting // the value, make sure the defaults still work. if let Some(home) = env::var_os("HOME") { search_path.push(PathBuf::from(home).join("lib")); } search_path.push(PathBuf::from("/usr/local/lib")); search_path.push(PathBuf::from("/usr/lib")); } let search_path = paths::join_paths(&search_path, paths::dylib_path_envvar())?; cmd.env(paths::dylib_path_envvar(), &search_path); if let Some(meta) = script_meta { if let Some(env) = self.extra_env.get(&meta) { for (k, v) in env { cmd.env(k, v); } } } let metadata = pkg.manifest().metadata(); let cargo_exe = self.config.cargo_exe()?; cmd.env(crate::CARGO_ENV, cargo_exe); // When adding new environment variables depending on // crate properties which might require rebuild upon change // consider adding the corresponding properties to the hash // in BuildContext::target_metadata() cmd.env("CARGO_MANIFEST_DIR", pkg.root()) .env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string()) .env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string()) .env("CARGO_PKG_VERSION_PATCH", &pkg.version().patch.to_string()) .env("CARGO_PKG_VERSION_PRE", pkg.version().pre.as_str()) .env("CARGO_PKG_VERSION", &pkg.version().to_string()) .env("CARGO_PKG_NAME", &*pkg.name()) .env( "CARGO_PKG_DESCRIPTION", metadata.description.as_ref().unwrap_or(&String::new()), ) .env( "CARGO_PKG_HOMEPAGE", metadata.homepage.as_ref().unwrap_or(&String::new()), ) .env( "CARGO_PKG_REPOSITORY", metadata.repository.as_ref().unwrap_or(&String::new()), ) .env( "CARGO_PKG_LICENSE", metadata.license.as_ref().unwrap_or(&String::new()), ) .env( "CARGO_PKG_LICENSE_FILE", metadata.license_file.as_ref().unwrap_or(&String::new()), ) .env("CARGO_PKG_AUTHORS", &pkg.authors().join(":")) .cwd(pkg.root()); // Apply any environment variables from the config for (key, value) in self.config.env_config()?.iter() { // never override a value that has already been set by cargo if cmd.get_envs().contains_key(key) { continue; } if value.is_force() || env::var_os(key).is_none() { cmd.env(key, value.resolve(self.config)); } } Ok(cmd) } } /// Prepares a rustc_tool process with additional environment variables /// that are only relevant in a context that has a unit fn fill_rustc_tool_env(mut cmd: ProcessBuilder, unit: &Unit) -> ProcessBuilder { if unit.target.is_bin() { cmd.env("CARGO_BIN_NAME", unit.target.name()); } cmd.env("CARGO_CRATE_NAME", unit.target.crate_name()); cmd } fn target_runner( bcx: &BuildContext<'_, '_>, kind: CompileKind, ) -> CargoResult<Option<(PathBuf, Vec<String>)>> { let target = bcx.target_data.short_name(&kind); // try target.{}.runner let key = format!("target.{}.runner", target); if let Some(v) = bcx.config.get::<Option<config::PathAndArgs>>(&key)? { let path = v.path.resolve_program(bcx.config); return Ok(Some((path, v.args))); } // try target.'cfg(...)'.runner let target_cfg = bcx.target_data.info(kind).cfg(); let mut cfgs = bcx .config .target_cfgs()? .iter() .filter_map(|(key, cfg)| cfg.runner.as_ref().map(|runner| (key, runner))) .filter(|(key, _runner)| CfgExpr::matches_key(key, target_cfg)); let matching_runner = cfgs.next(); if let Some((key, runner)) = cfgs.next() { anyhow::bail!( "several matching instances of `target.'cfg(..)'.runner` in `.cargo/config`\n\ first match `{}` located in {}\n\ second match `{}` located in {}", matching_runner.unwrap().0, matching_runner.unwrap().1.definition, key, runner.definition ); } Ok(matching_runner.map(|(_k, runner)| { ( runner.val.path.clone().resolve_program(bcx.config), runner.val.args.clone(), ) })) }
36.496386
95
0.5925
2369ff78b1ce05eee76299ab1e9383da4b919fbc
1,267
use std::sync::Arc; use crate::clean::cfg::Cfg; use crate::clean::{Crate, Item}; use crate::core::DocContext; use crate::fold::DocFolder; use crate::passes::Pass; crate const PROPAGATE_DOC_CFG: Pass = Pass { name: "propagate-doc-cfg", run: propagate_doc_cfg, description: "propagates `#[doc(cfg(...))]` to child items", }; crate fn propagate_doc_cfg(cr: Crate, _: &mut DocContext<'_>) -> Crate { CfgPropagator { parent_cfg: None }.fold_crate(cr) } struct CfgPropagator { parent_cfg: Option<Arc<Cfg>>, } impl DocFolder for CfgPropagator { fn fold_item(&mut self, mut item: Item) -> Option<Item> { let old_parent_cfg = self.parent_cfg.clone(); let new_cfg = match (self.parent_cfg.take(), item.attrs.cfg.take()) { (None, None) => None, (Some(rc), None) | (None, Some(rc)) => Some(rc), (Some(mut a), Some(b)) => { let b = Arc::try_unwrap(b).unwrap_or_else(|rc| Cfg::clone(&rc)); *Arc::make_mut(&mut a) &= b; Some(a) } }; self.parent_cfg = new_cfg.clone(); item.attrs.cfg = new_cfg; let result = self.fold_item_recur(item); self.parent_cfg = old_parent_cfg; Some(result) } }
28.155556
80
0.586425
f94c94012249095d2e7c3b438a30de4a8b44bdf8
8,003
use crate::error::ContractError; use crate::state::{ read_bid, read_bids_by_collateral, read_bids_by_user, read_config, remove_bid, store_bid, Bid, Config, }; use cosmwasm_bignumber::{Decimal256, Uint256}; use cosmwasm_std::{ attr, to_binary, Addr, BankMsg, Coin, CosmosMsg, Deps, DepsMut, Env, MessageInfo, Response, StdResult, WasmMsg, }; use cw20::Cw20ExecuteMsg; use moneymarket::liquidation::{BidResponse, BidsResponse}; use moneymarket::oracle::PriceResponse; use moneymarket::querier::{deduct_tax, query_price, TimeConstraints}; pub fn submit_bid( deps: DepsMut, info: MessageInfo, collateral_token: Addr, premium_rate: Decimal256, ) -> Result<Response, ContractError> { let collateral_token_raw = deps.api.addr_canonicalize(collateral_token.as_str())?; let bidder_raw = deps.api.addr_canonicalize(info.sender.as_str())?; if read_bid(deps.storage, &bidder_raw, &collateral_token_raw).is_ok() { return Err(ContractError::AlreadyBidForCollateral(collateral_token)); } let config: Config = read_config(deps.storage)?; if config.max_premium_rate < premium_rate { return Err(ContractError::PremiumExceedsMaxPremium( config.max_premium_rate.to_string(), )); } let amount: Uint256 = Uint256::from( info.funds .iter() .find(|c| c.denom == config.stable_denom) .map(|c| c.amount) .ok_or(ContractError::AssetNotProvided(config.stable_denom))?, ); store_bid( deps.storage, &bidder_raw, &collateral_token_raw, Bid { amount, premium_rate, }, )?; Ok(Response::new().add_attributes(vec![ attr("action", "submit_bid"), attr("collateral_token", collateral_token), attr("amount", amount), ])) } pub fn retract_bid( deps: DepsMut, info: MessageInfo, collateral_token: Addr, amount: Option<Uint256>, ) -> Result<Response, ContractError> { let config: Config = read_config(deps.storage)?; let collateral_token_raw = deps.api.addr_canonicalize(collateral_token.as_str())?; let bidder_raw = deps.api.addr_canonicalize(info.sender.as_str())?; let bid: Bid = read_bid(deps.storage, &bidder_raw, &collateral_token_raw)?; let amount = amount.unwrap_or(bid.amount); if amount > bid.amount { return Err(ContractError::RetractExceedsBid(bid.amount.into())); } if amount == bid.amount { remove_bid(deps.storage, &bidder_raw, &collateral_token_raw); } else { store_bid( deps.storage, &bidder_raw, &collateral_token_raw, Bid { amount: bid.amount - amount, ..bid }, )?; } Ok(Response::new() .add_message(CosmosMsg::Bank(BankMsg::Send { to_address: info.sender.to_string(), amount: vec![deduct_tax( deps.as_ref(), Coin { denom: config.stable_denom, amount: amount.into(), }, )?], })) .add_attributes(vec![ attr("action", "retract_bid"), attr("collateral_token", collateral_token), attr("bidder", info.sender), attr("amount", amount), ])) } pub fn execute_bid( deps: DepsMut, env: Env, liquidator: Addr, repay_address: Addr, fee_address: Addr, collateral_token: Addr, amount: Uint256, ) -> Result<Response, ContractError> { let config: Config = read_config(deps.storage)?; let collateral_token_raw = deps.api.addr_canonicalize(collateral_token.as_str())?; let bidder_raw = deps.api.addr_canonicalize(liquidator.as_str())?; let bid: Bid = read_bid(deps.storage, &bidder_raw, &collateral_token_raw)?; let oracle_contract = deps.api.addr_humanize(&config.oracle_contract)?; let price: PriceResponse = query_price( deps.as_ref(), oracle_contract, collateral_token.to_string(), config.stable_denom.clone(), Some(TimeConstraints { block_time: env.block.time.seconds(), valid_timeframe: config.price_timeframe, }), )?; let collateral_value = amount * price.rate; let required_stable = collateral_value * (Decimal256::one() - std::cmp::min(bid.premium_rate, config.max_premium_rate)); if required_stable > bid.amount { return Err(ContractError::InsufficientBidBalance( required_stable.into(), )); } // Update bid if bid.amount == required_stable { remove_bid(deps.storage, &bidder_raw, &collateral_token_raw); } else { store_bid( deps.storage, &bidder_raw, &collateral_token_raw, Bid { amount: bid.amount - required_stable, ..bid }, )?; } let bid_fee = required_stable * config.bid_fee; let repay_amount = required_stable - bid_fee; let mut messages: Vec<CosmosMsg> = vec![ CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: collateral_token.to_string(), funds: vec![], msg: to_binary(&Cw20ExecuteMsg::Transfer { recipient: liquidator.to_string(), amount: amount.into(), })?, }), CosmosMsg::Bank(BankMsg::Send { to_address: repay_address.to_string(), amount: vec![deduct_tax( deps.as_ref(), Coin { denom: config.stable_denom.clone(), amount: repay_amount.into(), }, )?], }), ]; if !bid_fee.is_zero() { messages.push(CosmosMsg::Bank(BankMsg::Send { to_address: fee_address.to_string(), amount: vec![deduct_tax( deps.as_ref(), Coin { denom: config.stable_denom.clone(), amount: bid_fee.into(), }, )?], })); } Ok(Response::new().add_messages(messages).add_attributes(vec![ attr("action", "execute_bid"), attr("stable_denom", config.stable_denom), attr("repay_amount", repay_amount), attr("bid_fee", bid_fee), attr("collateral_token", collateral_token), attr("collateral_amount", amount), ])) } pub fn query_bid(deps: Deps, collateral_token: Addr, bidder: Addr) -> StdResult<BidResponse> { let bid: Bid = read_bid( deps.storage, &deps.api.addr_canonicalize(bidder.as_str())?, &deps.api.addr_canonicalize(collateral_token.as_str())?, )?; Ok(BidResponse { collateral_token: collateral_token.to_string(), bidder: bidder.to_string(), amount: bid.amount, premium_rate: bid.premium_rate, }) } pub fn query_bids_by_user( deps: Deps, bidder: Addr, start_after: Option<Addr>, limit: Option<u32>, ) -> StdResult<BidsResponse> { let start_after = if let Some(start_after) = start_after { Some(deps.api.addr_canonicalize(start_after.as_str())?) } else { None }; let bids: Vec<BidResponse> = read_bids_by_user( deps, &deps.api.addr_canonicalize(bidder.as_str())?, start_after, limit, )?; Ok(BidsResponse { bids }) } pub fn query_bids_by_collateral( deps: Deps, collateral_token: Addr, start_after: Option<Addr>, limit: Option<u32>, ) -> StdResult<BidsResponse> { let start_after = if let Some(start_after) = start_after { Some(deps.api.addr_canonicalize(start_after.as_str())?) } else { None }; let bids: Vec<BidResponse> = read_bids_by_collateral( deps, &deps.api.addr_canonicalize(collateral_token.as_str())?, start_after, limit, )?; Ok(BidsResponse { bids }) }
30.2
98
0.596276
1df544edf37b85ebab48105f40832e4b892f39aa
382
struct Solution; impl Solution { pub fn next_greatest_letter(letters: Vec<char>, target: char) -> char { let first_char = letters[0]; for i in letters { if i > target { return i; } } return first_char; } } fn main() { println!("{:?}", Solution::next_greatest_letter(vec!['c', 'f', 'j'], 'a')); }
21.222222
79
0.505236
bfab07725ba3adfc6bfa4c1642e4c3ce89c3d209
8,199
//! Code for building the standard library. use super::layout::Layout; use crate::core::compiler::{BuildContext, CompileKind, CompileMode, Context, FileFlavor, Unit}; use crate::core::profiles::UnitFor; use crate::core::resolver::ResolveOpts; use crate::core::{Dependency, PackageId, PackageSet, Resolve, SourceId, Workspace}; use crate::ops::{self, Packages}; use crate::util::errors::CargoResult; use crate::util::paths; use std::collections::{HashMap, HashSet}; use std::env; use std::path::PathBuf; /// Parse the `-Zbuild-std` flag. pub fn parse_unstable_flag(value: Option<&str>) -> Vec<String> { // This is a temporary hack until there is a more principled way to // declare dependencies in Cargo.toml. let value = value.unwrap_or("std"); let mut crates: HashSet<&str> = value.split(',').collect(); if crates.contains("std") { crates.insert("core"); crates.insert("alloc"); crates.insert("proc_macro"); crates.insert("panic_unwind"); crates.insert("compiler_builtins"); } else if crates.contains("core") { crates.insert("compiler_builtins"); } crates.into_iter().map(|s| s.to_string()).collect() } /// Resolve the standard library dependencies. pub fn resolve_std<'cfg>( ws: &Workspace<'cfg>, crates: &[String], ) -> CargoResult<(PackageSet<'cfg>, Resolve)> { let src_path = detect_sysroot_src_path(ws)?; let to_patch = [ "rustc-std-workspace-core", "rustc-std-workspace-alloc", "rustc-std-workspace-std", ]; let patches = to_patch .iter() .map(|&name| { let source_path = SourceId::for_path(&src_path.join("src").join("tools").join(name))?; let dep = Dependency::parse_no_deprecated(name, None, source_path)?; Ok(dep) }) .collect::<CargoResult<Vec<_>>>()?; let crates_io_url = crate::sources::CRATES_IO_INDEX.parse().unwrap(); let mut patch = HashMap::new(); patch.insert(crates_io_url, patches); let members = vec![ String::from("src/libstd"), String::from("src/libcore"), String::from("src/liballoc"), String::from("src/libtest"), ]; let ws_config = crate::core::WorkspaceConfig::Root(crate::core::WorkspaceRootConfig::new( &src_path, &Some(members), /*default_members*/ &None, /*exclude*/ &None, )); let virtual_manifest = crate::core::VirtualManifest::new( /*replace*/ Vec::new(), patch, ws_config, // Profiles are not used here, but we need something to pass in. ws.profiles().clone(), crate::core::Features::default(), ); let config = ws.config(); // This is a delicate hack. In order for features to resolve correctly, // the resolver needs to run a specific "current" member of the workspace. // Thus, in order to set the features for `std`, we need to set `libtest` // to be the "current" member. `libtest` is the root, and all other // standard library crates are dependencies from there. Since none of the // other crates need to alter their features, this should be fine, for // now. Perhaps in the future features will be decoupled from the resolver // and it will be easier to control feature selection. let current_manifest = src_path.join("src/libtest/Cargo.toml"); // TODO: Consider doing something to enforce --locked? Or to prevent the // lock file from being written, such as setting ephemeral. let mut std_ws = Workspace::new_virtual(src_path, current_manifest, virtual_manifest, config)?; // Don't require optional dependencies in this workspace, aka std's own // `[dev-dependencies]`. No need for us to generate a `Resolve` which has // those included because we'll never use them anyway. std_ws.set_require_optional_deps(false); // `test` is not in the default set because it is optional, but it needs // to be part of the resolve in case we do need it. let mut spec_pkgs = Vec::from(crates); spec_pkgs.push("test".to_string()); let spec = Packages::Packages(spec_pkgs); let specs = spec.to_package_id_specs(&std_ws)?; let features = vec!["panic-unwind".to_string(), "backtrace".to_string()]; // dev_deps setting shouldn't really matter here. let opts = ResolveOpts::new( /*dev_deps*/ false, &features, /*all_features*/ false, /*uses_default_features*/ true, ); let resolve = ops::resolve_ws_with_opts(&std_ws, opts, &specs)?; Ok((resolve.pkg_set, resolve.targeted_resolve)) } /// Generate a list of root `Unit`s for the standard library. /// /// The given slice of crate names is the root set. pub fn generate_std_roots<'a>( bcx: &BuildContext<'a, '_>, crates: &[String], std_resolve: &'a Resolve, kind: CompileKind, ) -> CargoResult<Vec<Unit<'a>>> { // Generate the root Units for the standard library. let std_ids = crates .iter() .map(|crate_name| std_resolve.query(crate_name)) .collect::<CargoResult<Vec<PackageId>>>()?; // Convert PackageId to Package. let std_pkgs = bcx.packages.get_many(std_ids)?; // Generate a list of Units. std_pkgs .into_iter() .map(|pkg| { let lib = pkg .targets() .iter() .find(|t| t.is_lib()) .expect("std has a lib"); let unit_for = UnitFor::new_normal(); // I don't think we need to bother with Check here, the difference // in time is minimal, and the difference in caching is // significant. let mode = CompileMode::Build; let profile = bcx.profiles.get_profile( pkg.package_id(), /*is_member*/ false, unit_for, mode, bcx.build_config.profile_kind.clone(), ); let features = std_resolve.features_sorted(pkg.package_id()); Ok(bcx.units.intern( pkg, lib, profile, kind, mode, features, /*is_std*/ true, )) }) .collect::<CargoResult<Vec<_>>>() } fn detect_sysroot_src_path(ws: &Workspace<'_>) -> CargoResult<PathBuf> { if let Some(s) = env::var_os("__CARGO_TESTS_ONLY_SRC_ROOT") { return Ok(s.into()); } // NOTE: This is temporary until we figure out how to acquire the source. // If we decide to keep the sysroot probe, then BuildConfig will need to // be restructured so that the TargetInfo is created earlier and passed // in, so we don't have this extra call to rustc. let rustc = ws.config().load_global_rustc(Some(ws))?; let output = rustc.process().arg("--print=sysroot").exec_with_output()?; let s = String::from_utf8(output.stdout) .map_err(|e| failure::format_err!("rustc didn't return utf8 output: {:?}", e))?; let sysroot = PathBuf::from(s.trim()); let src_path = sysroot.join("lib").join("rustlib").join("src").join("rust"); let lock = src_path.join("Cargo.lock"); if !lock.exists() { failure::bail!( "{:?} does not exist, unable to build with the standard \ library, try:\n rustup component add rust-src", lock ); } Ok(src_path) } /// Prepare the output directory for the local sysroot. pub fn prepare_sysroot(layout: &Layout) -> CargoResult<()> { if let Some(libdir) = layout.sysroot_libdir() { if libdir.exists() { paths::remove_dir_all(libdir)?; } paths::create_dir_all(libdir)?; } Ok(()) } /// Copy an artifact to the sysroot. pub fn add_sysroot_artifact<'a>( cx: &Context<'a, '_>, unit: &Unit<'a>, rmeta: bool, ) -> CargoResult<()> { let outputs = cx.outputs(unit)?; let outputs = outputs .iter() .filter(|output| output.flavor == FileFlavor::Linkable { rmeta }) .map(|output| &output.path); for path in outputs { let libdir = cx.files().layout(unit.kind).sysroot_libdir().unwrap(); let dst = libdir.join(path.file_name().unwrap()); paths::link_or_copy(path, dst)?; } Ok(()) }
39.229665
99
0.621051
16367bdaa67f664b4e67c7244816aa13fc34dba2
3,809
pub mod day_10 { pub fn input_1() -> Vec<u8> { let input = include_str!("../input.txt"); input .trim() .split(',') .map(|l| l.parse().unwrap()) .collect::<Vec<u8>>() } pub fn input_2() -> Vec<u8> { let input = include_str!("../input.txt"); input.trim().chars().map(|c| c as u8).collect::<Vec<u8>>() } fn rev<T>(start: usize, length: usize, b: &mut [T]) where T: Copy, { for j in 0..(length / 2) { let tmp = b[(start + j) % b.len()]; b[(start + j) % b.len()] = b[(start + length - j - 1) % b.len()]; b[(start + length - j - 1) % b.len()] = tmp; } } pub(crate) struct HashState { pub(crate) v: Vec<u8>, pub(crate) curr_pos: usize, pub(crate) skip_size: usize, } pub(crate) fn new_state(size: usize) -> HashState { HashState { v: (0..size).map(|i| i as u8).collect(), curr_pos: 0, skip_size: 0, } } pub(crate) fn execute_round(state: &mut HashState, input: &[u8]) { for &i in input { let i = i as usize; rev(state.curr_pos, i, &mut state.v); state.curr_pos = (state.curr_pos + i + state.skip_size) % state.v.len(); state.skip_size += 1; } } pub fn part_1(size: usize, input: &[u8]) -> u32 { let mut state = new_state(size); execute_round(&mut state, input); state.v[0] as u32 * state.v[1] as u32 } fn densify(v: &[u8]) -> Vec<u8> { v.chunks_exact(16) .map(|i| i.iter().fold(0, |x, y| x ^ y)) .collect() } // Convert a number from 0 to 15 into an ASCII hex char fn to_hex(i: u8) -> u8 { if i < 10 { i + b'0' } else { i - 10 + b'a' } } pub fn knot_hash_unsalted(bytes: &[u8]) -> Vec<u8> { let mut state = new_state(256); for _ in 0..64 { execute_round(&mut state, &bytes); } densify(&state.v) } pub fn knot_hash(bytes: &[u8]) -> Vec<u8> { let mut copy: Vec<u8> = bytes.to_vec(); copy.extend(vec![17, 31, 73, 47, 23]); knot_hash_unsalted(&copy) } pub fn to_hex_str(bytes: &[u8]) -> String { let mut answer = vec![0u8; 2 * bytes.len()]; for (i, b) in bytes.iter().enumerate() { answer[2 * i] = to_hex(b / 16); answer[2 * i + 1] = to_hex(b % 16); } String::from_utf8(answer).unwrap() } pub fn part_2(input: &[u8]) -> String { to_hex_str(&knot_hash(input)) } } #[cfg(test)] mod tests { use super::day_10::*; #[test] fn part1_known() { let mut state = new_state(5); execute_round(&mut state, &[3, 4, 1, 5]); assert_eq!(state.v, vec![3, 4, 2, 1, 0]); assert_eq!(state.skip_size, 4); assert_eq!(state.curr_pos, 4); } #[test] fn part2_known() { assert_eq!( to_hex_str(&knot_hash("".as_bytes())), "a2582a3a0e66e6e86e3812dcb672a272" ); assert_eq!( to_hex_str(&knot_hash("AoC 2017".as_bytes())), "33efeb34ea91902bb2f59c9920caa6cd" ); assert_eq!( to_hex_str(&knot_hash("1,2,3".as_bytes())), "3efbe78a8d82f29979031a4aa0b16a9d" ); assert_eq!( to_hex_str(&knot_hash("1,2,4".as_bytes())), "63960835bcdc130f0b66d7ff4f6a5a8e" ); } #[test] fn test_day_10() { let input = input_1(); assert_eq!(part_1(256, &input), 4114); let input = input_2(); assert_eq!(part_2(&input), "2f8c3d2100fdd57cec130d928b0fd2dd"); } }
27.014184
84
0.483854
d50790f37a54594df1877ab90b8eca4bba667fa9
3,177
#[doc = "Register `ISI_PDECF` reader"] pub struct R(crate::R<ISI_PDECF_SPEC>); impl core::ops::Deref for R { type Target = crate::R<ISI_PDECF_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<ISI_PDECF_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<ISI_PDECF_SPEC>) -> Self { R(reader) } } #[doc = "Register `ISI_PDECF` writer"] pub struct W(crate::W<ISI_PDECF_SPEC>); impl core::ops::Deref for W { type Target = crate::W<ISI_PDECF_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<ISI_PDECF_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<ISI_PDECF_SPEC>) -> Self { W(writer) } } #[doc = "Field `DEC_FACTOR` reader - Decimation Factor"] pub struct DEC_FACTOR_R(crate::FieldReader<u8, u8>); impl DEC_FACTOR_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { DEC_FACTOR_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DEC_FACTOR_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DEC_FACTOR` writer - Decimation Factor"] pub struct DEC_FACTOR_W<'a> { w: &'a mut W, } impl<'a> DEC_FACTOR_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff); self.w } } impl R { #[doc = "Bits 0:7 - Decimation Factor"] #[inline(always)] pub fn dec_factor(&self) -> DEC_FACTOR_R { DEC_FACTOR_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - Decimation Factor"] #[inline(always)] pub fn dec_factor(&mut self) -> DEC_FACTOR_W { DEC_FACTOR_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "ISI Preview Decimation Factor Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [isi_pdecf](index.html) module"] pub struct ISI_PDECF_SPEC; impl crate::RegisterSpec for ISI_PDECF_SPEC { type Ux = u32; } #[doc = "`read()` method returns [isi_pdecf::R](R) reader structure"] impl crate::Readable for ISI_PDECF_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [isi_pdecf::W](W) writer structure"] impl crate::Writable for ISI_PDECF_SPEC { type Writer = W; } #[doc = "`reset()` method sets ISI_PDECF to value 0"] impl crate::Resettable for ISI_PDECF_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
30.548077
428
0.614731
ac8694cfc431e015c0bcae92025dfbbea405466b
66,789
use std::cmp; use std::collections::HashMap; use std::convert::TryFrom; use std::io::{self, Read, Seek}; use crate::{ bytecast, ColorType, TiffError, TiffFormatError, TiffResult, TiffUnsupportedError, UsageError, }; use self::ifd::Directory; use crate::tags::{ CompressionMethod, PhotometricInterpretation, Predictor, SampleFormat, Tag, Type, }; use self::stream::{ ByteOrder, DeflateReader, EndianReader, JpegReader, LZWReader, PackBitsReader, SmartReader, }; pub mod ifd; mod stream; /// Result of a decoding process #[derive(Debug)] pub enum DecodingResult { /// A vector of unsigned bytes U8(Vec<u8>), /// A vector of unsigned words U16(Vec<u16>), /// A vector of 32 bit unsigned ints U32(Vec<u32>), /// A vector of 64 bit unsigned ints U64(Vec<u64>), /// A vector of 32 bit IEEE floats F32(Vec<f32>), /// A vector of 64 bit IEEE floats F64(Vec<f64>), /// A vector of 8 bit signed ints I8(Vec<i8>), /// A vector of 16 bit signed ints I16(Vec<i16>), /// A vector of 32 bit signed ints I32(Vec<i32>), /// A vector of 64 bit signed ints I64(Vec<i64>), } impl DecodingResult { fn new_u8(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::U8(vec![0; size])) } } fn new_u16(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / 2 { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::U16(vec![0; size])) } } fn new_u32(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / 4 { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::U32(vec![0; size])) } } fn new_u64(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / 8 { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::U64(vec![0; size])) } } fn new_f32(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / std::mem::size_of::<f32>() { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::F32(vec![0.0; size])) } } fn new_f64(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / std::mem::size_of::<f64>() { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::F64(vec![0.0; size])) } } fn new_i8(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / std::mem::size_of::<i8>() { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::I8(vec![0; size])) } } fn new_i16(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / 2 { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::I16(vec![0; size])) } } fn new_i32(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / 4 { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::I32(vec![0; size])) } } fn new_i64(size: usize, limits: &Limits) -> TiffResult<DecodingResult> { if size > limits.decoding_buffer_size / 8 { Err(TiffError::LimitsExceeded) } else { Ok(DecodingResult::I64(vec![0; size])) } } pub fn as_buffer(&mut self, start: usize) -> DecodingBuffer { match *self { DecodingResult::U8(ref mut buf) => DecodingBuffer::U8(&mut buf[start..]), DecodingResult::U16(ref mut buf) => DecodingBuffer::U16(&mut buf[start..]), DecodingResult::U32(ref mut buf) => DecodingBuffer::U32(&mut buf[start..]), DecodingResult::U64(ref mut buf) => DecodingBuffer::U64(&mut buf[start..]), DecodingResult::F32(ref mut buf) => DecodingBuffer::F32(&mut buf[start..]), DecodingResult::F64(ref mut buf) => DecodingBuffer::F64(&mut buf[start..]), DecodingResult::I8(ref mut buf) => DecodingBuffer::I8(&mut buf[start..]), DecodingResult::I16(ref mut buf) => DecodingBuffer::I16(&mut buf[start..]), DecodingResult::I32(ref mut buf) => DecodingBuffer::I32(&mut buf[start..]), DecodingResult::I64(ref mut buf) => DecodingBuffer::I64(&mut buf[start..]), } } } // A buffer for image decoding pub enum DecodingBuffer<'a> { /// A slice of unsigned bytes U8(&'a mut [u8]), /// A slice of unsigned words U16(&'a mut [u16]), /// A slice of 32 bit unsigned ints U32(&'a mut [u32]), /// A slice of 64 bit unsigned ints U64(&'a mut [u64]), /// A slice of 32 bit IEEE floats F32(&'a mut [f32]), /// A slice of 64 bit IEEE floats F64(&'a mut [f64]), /// A slice of 8 bits signed ints I8(&'a mut [i8]), /// A slice of 16 bits signed ints I16(&'a mut [i16]), /// A slice of 32 bits signed ints I32(&'a mut [i32]), /// A slice of 64 bits signed ints I64(&'a mut [i64]), } impl<'a> DecodingBuffer<'a> { fn len(&self) -> usize { match *self { DecodingBuffer::U8(ref buf) => buf.len(), DecodingBuffer::U16(ref buf) => buf.len(), DecodingBuffer::U32(ref buf) => buf.len(), DecodingBuffer::U64(ref buf) => buf.len(), DecodingBuffer::F32(ref buf) => buf.len(), DecodingBuffer::F64(ref buf) => buf.len(), DecodingBuffer::I8(ref buf) => buf.len(), DecodingBuffer::I16(ref buf) => buf.len(), DecodingBuffer::I32(ref buf) => buf.len(), DecodingBuffer::I64(ref buf) => buf.len(), } } fn byte_len(&self) -> usize { match *self { DecodingBuffer::U8(_) => 1, DecodingBuffer::U16(_) => 2, DecodingBuffer::U32(_) => 4, DecodingBuffer::U64(_) => 8, DecodingBuffer::F32(_) => 4, DecodingBuffer::F64(_) => 8, DecodingBuffer::I8(_) => 1, DecodingBuffer::I16(_) => 2, DecodingBuffer::I32(_) => 4, DecodingBuffer::I64(_) => 8, } } fn copy<'b>(&'b mut self) -> DecodingBuffer<'b> where 'a: 'b, { match *self { DecodingBuffer::U8(ref mut buf) => DecodingBuffer::U8(buf), DecodingBuffer::U16(ref mut buf) => DecodingBuffer::U16(buf), DecodingBuffer::U32(ref mut buf) => DecodingBuffer::U32(buf), DecodingBuffer::U64(ref mut buf) => DecodingBuffer::U64(buf), DecodingBuffer::F32(ref mut buf) => DecodingBuffer::F32(buf), DecodingBuffer::F64(ref mut buf) => DecodingBuffer::F64(buf), DecodingBuffer::I8(ref mut buf) => DecodingBuffer::I8(buf), DecodingBuffer::I16(ref mut buf) => DecodingBuffer::I16(buf), DecodingBuffer::I32(ref mut buf) => DecodingBuffer::I32(buf), DecodingBuffer::I64(ref mut buf) => DecodingBuffer::I64(buf), } } fn prefix<'b>(&'b mut self, new_length: usize) -> DecodingBuffer<'b> where 'a: 'b, { match *self { DecodingBuffer::U8(ref mut buf) => DecodingBuffer::U8(&mut buf[..new_length]), DecodingBuffer::U16(ref mut buf) => DecodingBuffer::U16(&mut buf[..new_length]), DecodingBuffer::U32(ref mut buf) => DecodingBuffer::U32(&mut buf[..new_length]), DecodingBuffer::U64(ref mut buf) => DecodingBuffer::U64(&mut buf[..new_length]), DecodingBuffer::F32(ref mut buf) => DecodingBuffer::F32(&mut buf[..new_length]), DecodingBuffer::F64(ref mut buf) => DecodingBuffer::F64(&mut buf[..new_length]), DecodingBuffer::I8(ref mut buf) => DecodingBuffer::I8(&mut buf[..new_length]), DecodingBuffer::I16(ref mut buf) => DecodingBuffer::I16(&mut buf[..new_length]), DecodingBuffer::I32(ref mut buf) => DecodingBuffer::I32(&mut buf[..new_length]), DecodingBuffer::I64(ref mut buf) => DecodingBuffer::I64(&mut buf[..new_length]), } } } #[derive(Debug)] struct StripDecodeState { strip_index: usize, strip_offsets: Vec<u64>, strip_bytes: Vec<u64>, } #[derive(Debug)] /// Computed values useful for tile decoding struct TileAttributes { tile_width: usize, tile_length: usize, tiles_down: usize, tiles_across: usize, /// Length of padding for rightmost tiles in pixels padding_right: usize, /// length of padding for bottommost tile in pixels padding_down: usize, /// A simple buffer right paddding is read into tile_samples: usize, /// Sample count of one row of one tile row_samples: usize, /// Sample count of one row of tiles tile_strip_samples: usize, } impl TileAttributes { /// Returns the tile offset in the result buffer, counted in samples fn get_offset(&self, tile: usize) -> usize { let row = tile / self.tiles_across; let column = tile % self.tiles_across; (row * self.tile_strip_samples) + (column * self.row_samples) } fn get_padding(&self, tile: usize) -> (usize, usize) { let row = tile / self.tiles_across; let column = tile % self.tiles_across; let padding_right = if column == self.tiles_across - 1 { self.padding_right } else { 0 }; let padding_down = if row == self.tiles_down - 1 { self.padding_down } else { 0 }; (padding_right, padding_down) } } #[derive(Debug)] /// Stateful variables for tile decoding struct TileDecodeState { current_tile: usize, tile_offsets: Vec<u64>, tile_bytes: Vec<u64>, /// Buffer used for skipping horizontal padding padding_buffer: Vec<u8>, /// Pixel width of one row of the decoding result (tile / whole image) result_width: usize, } #[derive(Debug, Copy, Clone, PartialEq)] /// Chunk type of the internal representation pub enum ChunkType { Strip, Tile, } /// Decoding limits #[derive(Clone, Debug)] pub struct Limits { /// The maximum size of any `DecodingResult` in bytes, the default is /// 256MiB. If the entire image is decoded at once, then this will /// be the maximum size of the image. If it is decoded one strip at a /// time, this will be the maximum size of a strip. pub decoding_buffer_size: usize, /// The maximum size of any ifd value in bytes, the default is /// 1MiB. pub ifd_value_size: usize, /// Maximum size for intermediate buffer which may be used to limit the amount of data read per /// segment even if the entire image is decoded at once. pub intermediate_buffer_size: usize, /// The purpose of this is to prevent all the fields of the struct from /// being public, as this would make adding new fields a major version /// bump. _non_exhaustive: (), } impl Limits { /// A configuration that does not impose any limits. /// /// This is a good start if the caller only wants to impose selective limits, contrary to the /// default limits which allows selectively disabling limits. /// /// Note that this configuration is likely to crash on excessively large images since, /// naturally, the machine running the program does not have infinite memory. pub fn unlimited() -> Limits { Limits { decoding_buffer_size: usize::max_value(), ifd_value_size: usize::max_value(), intermediate_buffer_size: usize::max_value(), _non_exhaustive: (), } } } impl Default for Limits { fn default() -> Limits { Limits { decoding_buffer_size: 256 * 1024 * 1024, intermediate_buffer_size: 128 * 1024 * 1024, ifd_value_size: 1024 * 1024, _non_exhaustive: (), } } } /// The representation of a TIFF decoder /// /// Currently does not support decoding of interlaced images #[derive(Debug)] pub struct Decoder<R> where R: Read + Seek, { reader: SmartReader<R>, byte_order: ByteOrder, bigtiff: bool, limits: Limits, next_ifd: Option<u64>, ifd: Option<Directory>, width: u32, height: u32, bits_per_sample: Vec<u8>, samples: u8, sample_format: Vec<SampleFormat>, photometric_interpretation: PhotometricInterpretation, compression_method: CompressionMethod, chunk_type: ChunkType, strip_decoder: Option<StripDecodeState>, tile_decoder: Option<TileDecodeState>, tile_attributes: Option<TileAttributes>, } trait Wrapping { fn wrapping_add(&self, other: Self) -> Self; } impl Wrapping for u8 { fn wrapping_add(&self, other: Self) -> Self { u8::wrapping_add(*self, other) } } impl Wrapping for u16 { fn wrapping_add(&self, other: Self) -> Self { u16::wrapping_add(*self, other) } } impl Wrapping for u32 { fn wrapping_add(&self, other: Self) -> Self { u32::wrapping_add(*self, other) } } impl Wrapping for u64 { fn wrapping_add(&self, other: Self) -> Self { u64::wrapping_add(*self, other) } } impl Wrapping for i8 { fn wrapping_add(&self, other: Self) -> Self { i8::wrapping_add(*self, other) } } impl Wrapping for i16 { fn wrapping_add(&self, other: Self) -> Self { i16::wrapping_add(*self, other) } } impl Wrapping for i32 { fn wrapping_add(&self, other: Self) -> Self { i32::wrapping_add(*self, other) } } impl Wrapping for i64 { fn wrapping_add(&self, other: Self) -> Self { i64::wrapping_add(*self, other) } } fn rev_hpredict_nsamp<T>( image: &mut [T], size: (u32, u32), // Size of the block img_width: usize, // Width of the image (this distinction is needed for tiles) samples: usize, ) -> TiffResult<()> where T: Copy + Wrapping, { let width = usize::try_from(size.0)?; let height = usize::try_from(size.1)?; for row in 0..height { for col in samples..width * samples { let prev_pixel = image[(row * img_width * samples + col - samples)]; let pixel = &mut image[(row * img_width * samples + col)]; *pixel = pixel.wrapping_add(prev_pixel); } } Ok(()) } fn rev_hpredict( image: DecodingBuffer, size: (u32, u32), img_width: usize, color_type: ColorType, ) -> TiffResult<()> { // TODO: use bits_per_sample.len() after implementing type 3 predictor let samples = match color_type { ColorType::Gray(8) | ColorType::Gray(16) | ColorType::Gray(32) | ColorType::Gray(64) => 1, ColorType::RGB(8) | ColorType::RGB(16) | ColorType::RGB(32) | ColorType::RGB(64) => 3, ColorType::RGBA(8) | ColorType::RGBA(16) | ColorType::RGBA(32) | ColorType::RGBA(64) | ColorType::CMYK(8) | ColorType::CMYK(16) | ColorType::CMYK(32) | ColorType::CMYK(64) => 4, _ => { return Err(TiffError::UnsupportedError( TiffUnsupportedError::HorizontalPredictor(color_type), )) } }; match image { DecodingBuffer::U8(buf) => { rev_hpredict_nsamp(buf, size, img_width, samples)?; } DecodingBuffer::U16(buf) => { rev_hpredict_nsamp(buf, size, img_width, samples)?; } DecodingBuffer::U32(buf) => { rev_hpredict_nsamp(buf, size, img_width, samples)?; } DecodingBuffer::U64(buf) => { rev_hpredict_nsamp(buf, size, img_width, samples)?; } DecodingBuffer::F32(_buf) => { // FIXME: check how this is defined. // See issue #89. // rev_hpredict_nsamp(buf, size, img_width,samples)?; return Err(TiffError::UnsupportedError( TiffUnsupportedError::HorizontalPredictor(color_type), )); } DecodingBuffer::F64(_buf) => { //FIXME: check how this is defined. // See issue #89. // rev_hpredict_nsamp(buf, size, img_width,samples)?; return Err(TiffError::UnsupportedError( TiffUnsupportedError::HorizontalPredictor(color_type), )); } DecodingBuffer::I8(buf) => { rev_hpredict_nsamp(buf, size, img_width, samples)?; } DecodingBuffer::I16(buf) => { rev_hpredict_nsamp(buf, size, img_width, samples)?; } DecodingBuffer::I32(buf) => { rev_hpredict_nsamp(buf, size, img_width, samples)?; } DecodingBuffer::I64(buf) => { rev_hpredict_nsamp(buf, size, img_width, samples)?; } } Ok(()) } fn process_photometry_u64(buffer: &mut [u64]) { for datum in buffer.iter_mut() { *datum = 0xffff_ffff_ffff_ffff - *datum } } fn process_photometry_u32(buffer: &mut [u32]) { for datum in buffer.iter_mut() { *datum = 0xffff_ffff - *datum } } fn process_photometry_u16(buffer: &mut [u16]) { for datum in buffer.iter_mut() { *datum = 0xffff - *datum } } fn process_photometry_i16(buffer: &mut [i16]) { for datum in buffer.iter_mut() { *datum = !*datum; } } fn process_photometry_i8(buffer: &mut [i8]) { for datum in buffer.iter_mut() { *datum = !*datum; } } fn process_photometry_u8(buffer: &mut [u8]) { for byte in buffer.iter_mut() { *byte = 0xff - *byte } } fn process_photometry_f64(buffer: &mut [f64]) { for datum in buffer.iter_mut() { // FIXME: assumes [0, 1) range for floats *datum = 1.0 - *datum } } fn process_photometry_f32(buffer: &mut [f32]) { for datum in buffer.iter_mut() { // FIXME: assumes [0, 1) range for floats *datum = 1.0 - *datum } } impl<R: Read + Seek> Decoder<R> { /// Create a new decoder that decodes from the stream ```r``` pub fn new(r: R) -> TiffResult<Decoder<R>> { Decoder { reader: SmartReader::wrap(r, ByteOrder::LittleEndian), byte_order: ByteOrder::LittleEndian, bigtiff: false, limits: Default::default(), next_ifd: None, ifd: None, width: 0, height: 0, bits_per_sample: vec![1], samples: 1, sample_format: vec![SampleFormat::Uint], photometric_interpretation: PhotometricInterpretation::BlackIsZero, compression_method: CompressionMethod::None, chunk_type: ChunkType::Strip, strip_decoder: None, tile_decoder: None, tile_attributes: None, } .init() } pub fn with_limits(mut self, limits: Limits) -> Decoder<R> { self.limits = limits; self } pub fn dimensions(&mut self) -> TiffResult<(u32, u32)> { Ok((self.width, self.height)) } pub fn colortype(&mut self) -> TiffResult<ColorType> { match self.photometric_interpretation { PhotometricInterpretation::RGB => match self.bits_per_sample[..] { [r, g, b] if [r, r] == [g, b] => Ok(ColorType::RGB(r)), [r, g, b, a] if [r, r, r] == [g, b, a] => Ok(ColorType::RGBA(r)), // FIXME: We should _ignore_ other components. In particular: // > Beware of extra components. Some TIFF files may have more components per pixel // than you think. A Baseline TIFF reader must skip over them gracefully,using the // values of the SamplesPerPixel and BitsPerSample fields. // > -- TIFF 6.0 Specification, Section 7, Additional Baseline requirements. _ => Err(TiffError::UnsupportedError( TiffUnsupportedError::InterpretationWithBits( self.photometric_interpretation, self.bits_per_sample.clone(), ), )), }, PhotometricInterpretation::CMYK => match self.bits_per_sample[..] { [c, m, y, k] if [c, c, c] == [m, y, k] => Ok(ColorType::CMYK(c)), _ => Err(TiffError::UnsupportedError( TiffUnsupportedError::InterpretationWithBits( self.photometric_interpretation, self.bits_per_sample.clone(), ), )), }, PhotometricInterpretation::BlackIsZero | PhotometricInterpretation::WhiteIsZero if self.bits_per_sample.len() == 1 => { Ok(ColorType::Gray(self.bits_per_sample[0])) } // TODO: this is bad we should not fail at this point _ => Err(TiffError::UnsupportedError( TiffUnsupportedError::InterpretationWithBits( self.photometric_interpretation, self.bits_per_sample.clone(), ), )), } } fn read_header(&mut self) -> TiffResult<()> { let mut endianess = Vec::with_capacity(2); self.reader.by_ref().take(2).read_to_end(&mut endianess)?; match &*endianess { b"II" => { self.byte_order = ByteOrder::LittleEndian; self.reader.byte_order = ByteOrder::LittleEndian; } b"MM" => { self.byte_order = ByteOrder::BigEndian; self.reader.byte_order = ByteOrder::BigEndian; } _ => { return Err(TiffError::FormatError( TiffFormatError::TiffSignatureNotFound, )) } } match self.read_short()? { 42 => self.bigtiff = false, 43 => { self.bigtiff = true; // Read bytesize of offsets (in bigtiff it's alway 8 but provide a way to move to 16 some day) if self.read_short()? != 8 { return Err(TiffError::FormatError( TiffFormatError::TiffSignatureNotFound, )); } // This constant should always be 0 if self.read_short()? != 0 { return Err(TiffError::FormatError( TiffFormatError::TiffSignatureNotFound, )); } } _ => { return Err(TiffError::FormatError( TiffFormatError::TiffSignatureInvalid, )) } } self.next_ifd = match self.read_ifd_offset()? { 0 => None, n => Some(n), }; Ok(()) } /// Initializes the decoder. pub fn init(mut self) -> TiffResult<Decoder<R>> { self.read_header()?; self.next_image()?; Ok(self) } /// Reads in the next image. /// If there is no further image in the TIFF file a format error is returned. /// To determine whether there are more images call `TIFFDecoder::more_images` instead. pub fn next_image(&mut self) -> TiffResult<()> { self.ifd = Some(self.read_ifd()?); self.width = self.get_tag_u32(Tag::ImageWidth)?; self.height = self.get_tag_u32(Tag::ImageLength)?; self.strip_decoder = None; self.photometric_interpretation = self .find_tag_unsigned(Tag::PhotometricInterpretation)? .and_then(PhotometricInterpretation::from_u16) .ok_or(TiffUnsupportedError::UnknownInterpretation)?; if let Some(val) = self.find_tag_unsigned(Tag::Compression)? { self.compression_method = CompressionMethod::from_u16(val) .ok_or(TiffUnsupportedError::UnknownCompressionMethod)?; } if let Some(val) = self.find_tag_unsigned(Tag::SamplesPerPixel)? { self.samples = val; } if let Some(vals) = self.find_tag_unsigned_vec(Tag::SampleFormat)? { self.sample_format = vals .into_iter() .map(SampleFormat::from_u16_exhaustive) .collect(); // TODO: for now, only homogenous formats across samples are supported. if !self.sample_format.windows(2).all(|s| s[0] == s[1]) { return Err(TiffUnsupportedError::UnsupportedSampleFormat( self.sample_format.clone(), ) .into()); } } match self.samples { 1 | 3 | 4 => { if let Some(val) = self.find_tag_unsigned_vec(Tag::BitsPerSample)? { self.bits_per_sample = val; } } _ => return Err(TiffUnsupportedError::UnsupportedSampleDepth(self.samples).into()), } self.chunk_type = match ( self.get_tag_u32(Tag::RowsPerStrip), self.get_tag_u32(Tag::TileWidth), self.get_tag_u32(Tag::TileLength), ) { (Ok(_), Err(_), Err(_)) => ChunkType::Strip, (Err(_), Ok(_), Ok(_)) => ChunkType::Tile, // TODO: The spec says not to use both strip-oriented fields and tile-oriented fields. // We can relax this later if it becomes a problem _ => return Err(TiffError::FormatError(TiffFormatError::Format( String::from( "Neither strips nor tiles were found or both were used in the same file", ), ))), }; Ok(()) } /// Returns `true` if there is at least one more image available. pub fn more_images(&self) -> bool { self.next_ifd.is_some() } /// Returns the byte_order pub fn byte_order(&self) -> ByteOrder { self.byte_order } #[inline] pub fn read_ifd_offset(&mut self) -> Result<u64, io::Error> { if self.bigtiff { self.read_long8() } else { self.read_long().map(u64::from) } } /// Reads a TIFF byte value #[inline] pub fn read_byte(&mut self) -> Result<u8, io::Error> { let mut buf = [0; 1]; self.reader.read_exact(&mut buf)?; Ok(buf[0]) } /// Reads a TIFF short value #[inline] pub fn read_short(&mut self) -> Result<u16, io::Error> { self.reader.read_u16() } /// Reads a TIFF sshort value #[inline] pub fn read_sshort(&mut self) -> Result<i16, io::Error> { self.reader.read_i16() } /// Reads a TIFF long value #[inline] pub fn read_long(&mut self) -> Result<u32, io::Error> { self.reader.read_u32() } /// Reads a TIFF slong value #[inline] pub fn read_slong(&mut self) -> Result<i32, io::Error> { self.reader.read_i32() } /// Reads a TIFF float value #[inline] pub fn read_float(&mut self) -> Result<f32, io::Error> { self.reader.read_f32() } /// Reads a TIFF double value #[inline] pub fn read_double(&mut self) -> Result<f64, io::Error> { self.reader.read_f64() } #[inline] pub fn read_long8(&mut self) -> Result<u64, io::Error> { self.reader.read_u64() } #[inline] pub fn read_slong8(&mut self) -> Result<i64, io::Error> { self.reader.read_i64() } /// Reads a string #[inline] pub fn read_string(&mut self, length: usize) -> TiffResult<String> { let mut out = vec![0; length]; self.reader.read_exact(&mut out)?; // Strings may be null-terminated, so we trim anything downstream of the null byte if let Some(first) = out.iter().position(|&b| b == 0) { out.truncate(first); } Ok(String::from_utf8(out)?) } /// Reads a TIFF IFA offset/value field #[inline] pub fn read_offset(&mut self) -> TiffResult<[u8; 4]> { if self.bigtiff { return Err(TiffError::FormatError( TiffFormatError::InconsistentSizesEncountered, )); } let mut val = [0; 4]; self.reader.read_exact(&mut val)?; Ok(val) } /// Reads a TIFF IFA offset/value field #[inline] pub fn read_offset_u64(&mut self) -> Result<[u8; 8], io::Error> { let mut val = [0; 8]; self.reader.read_exact(&mut val)?; Ok(val) } /// Moves the cursor to the specified offset #[inline] pub fn goto_offset(&mut self, offset: u32) -> io::Result<()> { self.goto_offset_u64(offset.into()) } #[inline] pub fn goto_offset_u64(&mut self, offset: u64) -> io::Result<()> { self.reader.seek(io::SeekFrom::Start(offset)).map(|_| ()) } /// Reads a IFD entry. // An IFD entry has four fields: // // Tag 2 bytes // Type 2 bytes // Count 4 bytes // Value 4 bytes either a pointer the value itself fn read_entry(&mut self) -> TiffResult<Option<(Tag, ifd::Entry)>> { let tag = Tag::from_u16_exhaustive(self.read_short()?); let type_ = match Type::from_u16(self.read_short()?) { Some(t) => t, None => { // Unknown type. Skip this entry according to spec. self.read_long()?; self.read_long()?; return Ok(None); } }; let entry = if self.bigtiff { ifd::Entry::new_u64(type_, self.read_long8()?, self.read_offset_u64()?) } else { ifd::Entry::new(type_, self.read_long()?, self.read_offset()?) }; Ok(Some((tag, entry))) } /// Reads the next IFD fn read_ifd(&mut self) -> TiffResult<Directory> { let mut dir: Directory = HashMap::new(); match self.next_ifd { None => { return Err(TiffError::FormatError( TiffFormatError::ImageFileDirectoryNotFound, )) } Some(offset) => self.goto_offset_u64(offset)?, } let num_tags = if self.bigtiff { self.read_long8()? } else { self.read_short()?.into() }; for _ in 0..num_tags { let (tag, entry) = match self.read_entry()? { Some(val) => val, None => { continue; } // Unknown data type in tag, skip }; dir.insert(tag, entry); } self.next_ifd = match self.read_ifd_offset()? { 0 => None, n => Some(n), }; Ok(dir) } /// Tries to retrieve a tag. /// Return `Ok(None)` if the tag is not present. pub fn find_tag(&mut self, tag: Tag) -> TiffResult<Option<ifd::Value>> { let entry = match self.ifd.as_ref().unwrap().get(&tag) { None => return Ok(None), Some(entry) => entry.clone(), }; let limits = self.limits.clone(); Ok(Some(entry.val(&limits, self)?)) } /// Tries to retrieve a tag and convert it to the desired unsigned type. pub fn find_tag_unsigned<T: TryFrom<u64>>(&mut self, tag: Tag) -> TiffResult<Option<T>> { self.find_tag(tag)? .map(|v| v.into_u64()) .transpose()? .map(|value| { T::try_from(value).map_err(|_| TiffFormatError::InvalidTagValueType(tag).into()) }) .transpose() } /// Tries to retrieve a vector of all a tag's values and convert them to /// the desired unsigned type. pub fn find_tag_unsigned_vec<T: TryFrom<u64>>( &mut self, tag: Tag, ) -> TiffResult<Option<Vec<T>>> { self.find_tag(tag)? .map(|v| v.into_u64_vec()) .transpose()? .map(|v| { v.into_iter() .map(|u| { T::try_from(u).map_err(|_| TiffFormatError::InvalidTagValueType(tag).into()) }) .collect() }) .transpose() } /// Tries to retrieve a tag and convert it to the desired unsigned type. /// Returns an error if the tag is not present. pub fn get_tag_unsigned<T: TryFrom<u64>>(&mut self, tag: Tag) -> TiffResult<T> { self.find_tag_unsigned(tag)? .ok_or_else(|| TiffFormatError::RequiredTagNotFound(tag).into()) } /// Tries to retrieve a tag. /// Returns an error if the tag is not present pub fn get_tag(&mut self, tag: Tag) -> TiffResult<ifd::Value> { match self.find_tag(tag)? { Some(val) => Ok(val), None => Err(TiffError::FormatError( TiffFormatError::RequiredTagNotFound(tag), )), } } /// Tries to retrieve a tag and convert it to the desired type. pub fn get_tag_u32(&mut self, tag: Tag) -> TiffResult<u32> { self.get_tag(tag)?.into_u32() } pub fn get_tag_u64(&mut self, tag: Tag) -> TiffResult<u64> { self.get_tag(tag)?.into_u64() } /// Tries to retrieve a tag and convert it to the desired type. pub fn get_tag_f32(&mut self, tag: Tag) -> TiffResult<f32> { self.get_tag(tag)?.into_f32() } /// Tries to retrieve a tag and convert it to the desired type. pub fn get_tag_f64(&mut self, tag: Tag) -> TiffResult<f64> { self.get_tag(tag)?.into_f64() } /// Tries to retrieve a tag and convert it to the desired type. pub fn get_tag_u32_vec(&mut self, tag: Tag) -> TiffResult<Vec<u32>> { self.get_tag(tag)?.into_u32_vec() } pub fn get_tag_u16_vec(&mut self, tag: Tag) -> TiffResult<Vec<u16>> { self.get_tag(tag)?.into_u16_vec() } pub fn get_tag_u64_vec(&mut self, tag: Tag) -> TiffResult<Vec<u64>> { self.get_tag(tag)?.into_u64_vec() } /// Tries to retrieve a tag and convert it to the desired type. pub fn get_tag_f32_vec(&mut self, tag: Tag) -> TiffResult<Vec<f32>> { self.get_tag(tag)?.into_f32_vec() } /// Tries to retrieve a tag and convert it to the desired type. pub fn get_tag_f64_vec(&mut self, tag: Tag) -> TiffResult<Vec<f64>> { self.get_tag(tag)?.into_f64_vec() } /// Tries to retrieve a tag and convert it to a 8bit vector. pub fn get_tag_u8_vec(&mut self, tag: Tag) -> TiffResult<Vec<u8>> { self.get_tag(tag)?.into_u8_vec() } /// Tries to retrieve a tag and convert it to a ascii vector. pub fn get_tag_ascii_string(&mut self, tag: Tag) -> TiffResult<String> { self.get_tag(tag)?.into_string() } /// Decompresses the strip into the supplied buffer. /// Returns the number of bytes read. fn expand_strip<'a>( &mut self, mut buffer: DecodingBuffer<'a>, offset: u64, length: u64, ) -> TiffResult<()> { // Validate that the provided buffer is of the expected type. let color_type = self.colortype()?; match (color_type, &buffer) { (ColorType::RGB(n), _) | (ColorType::RGBA(n), _) | (ColorType::CMYK(n), _) | (ColorType::Gray(n), _) if usize::from(n) == buffer.byte_len() * 8 => {} (ColorType::Gray(n), DecodingBuffer::U8(_)) if n <= 8 => {} (type_, _) => { return Err(TiffError::UnsupportedError( TiffUnsupportedError::UnsupportedColorType(type_), )) } } // Construct necessary reader to perform decompression. self.goto_offset_u64(offset)?; let byte_order = self.reader.byte_order; let mut reader: Box<dyn Read> = match self.compression_method { CompressionMethod::None => Box::new(&mut self.reader), CompressionMethod::LZW => Box::new( LZWReader::new( &mut self.reader, usize::try_from(length)?, buffer.len() * buffer.byte_len(), )? .1, ), CompressionMethod::PackBits => { Box::new(PackBitsReader::new(&mut self.reader, usize::try_from(length)?)?.1) } CompressionMethod::OldDeflate => { Box::new(DeflateReader::new(&mut self.reader, buffer.len() * buffer.byte_len())?.1) } method => { return Err(TiffError::UnsupportedError( TiffUnsupportedError::UnsupportedCompressionMethod(method), )) } }; // Read into output buffer. { let buffer = match &mut buffer { DecodingBuffer::U8(buf) => &mut *buf, DecodingBuffer::I8(buf) => bytecast::i8_as_ne_mut_bytes(buf), DecodingBuffer::U16(buf) => bytecast::u16_as_ne_mut_bytes(buf), DecodingBuffer::I16(buf) => bytecast::i16_as_ne_mut_bytes(buf), DecodingBuffer::U32(buf) => bytecast::u32_as_ne_mut_bytes(buf), DecodingBuffer::I32(buf) => bytecast::i32_as_ne_mut_bytes(buf), DecodingBuffer::U64(buf) => bytecast::u64_as_ne_mut_bytes(buf), DecodingBuffer::I64(buf) => bytecast::i64_as_ne_mut_bytes(buf), DecodingBuffer::F32(buf) => bytecast::f32_as_ne_mut_bytes(buf), DecodingBuffer::F64(buf) => bytecast::f64_as_ne_mut_bytes(buf), }; let mut bytes_written = 0; while bytes_written < buffer.len() { match reader.read(&mut buffer[bytes_written..]) { Ok(0) => break, Ok(n) => bytes_written += n, Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(e) => Err(e)?, } } for b in &mut buffer[bytes_written..] { *b = 0; } } // Fix endianness. If `byte_order` matches the host, then conversion is a no-op. match byte_order { ByteOrder::LittleEndian => match &mut buffer { DecodingBuffer::U8(_) | DecodingBuffer::I8(_) => {} DecodingBuffer::U16(buf) => buf.iter_mut().for_each(|v| *v = u16::from_le(*v)), DecodingBuffer::I16(buf) => buf.iter_mut().for_each(|v| *v = i16::from_le(*v)), DecodingBuffer::U32(buf) => buf.iter_mut().for_each(|v| *v = u32::from_le(*v)), DecodingBuffer::I32(buf) => buf.iter_mut().for_each(|v| *v = i32::from_le(*v)), DecodingBuffer::U64(buf) => buf.iter_mut().for_each(|v| *v = u64::from_le(*v)), DecodingBuffer::I64(buf) => buf.iter_mut().for_each(|v| *v = i64::from_le(*v)), DecodingBuffer::F32(buf) => buf .iter_mut() .for_each(|v| *v = f32::from_bits(u32::from_le(v.to_bits()))), DecodingBuffer::F64(buf) => buf .iter_mut() .for_each(|v| *v = f64::from_bits(u64::from_le(v.to_bits()))), }, ByteOrder::BigEndian => match &mut buffer { DecodingBuffer::U8(_) | DecodingBuffer::I8(_) => {} DecodingBuffer::U16(buf) => buf.iter_mut().for_each(|v| *v = u16::from_be(*v)), DecodingBuffer::I16(buf) => buf.iter_mut().for_each(|v| *v = i16::from_be(*v)), DecodingBuffer::U32(buf) => buf.iter_mut().for_each(|v| *v = u32::from_be(*v)), DecodingBuffer::I32(buf) => buf.iter_mut().for_each(|v| *v = i32::from_be(*v)), DecodingBuffer::U64(buf) => buf.iter_mut().for_each(|v| *v = u64::from_be(*v)), DecodingBuffer::I64(buf) => buf.iter_mut().for_each(|v| *v = i64::from_be(*v)), DecodingBuffer::F32(buf) => buf .iter_mut() .for_each(|v| *v = f32::from_bits(u32::from_be(v.to_bits()))), DecodingBuffer::F64(buf) => buf .iter_mut() .for_each(|v| *v = f64::from_bits(u64::from_be(v.to_bits()))), }, } // Invert colors if necessary. if self.photometric_interpretation == PhotometricInterpretation::WhiteIsZero { match (color_type, &mut buffer) { (ColorType::Gray(64), DecodingBuffer::U64(ref mut buffer)) => { for datum in buffer.iter_mut() { *datum = 0xffff_ffff_ffff_ffff - *datum } } (ColorType::Gray(32), DecodingBuffer::U32(ref mut buffer)) => { for datum in buffer.iter_mut() { *datum = 0xffff_ffff - *datum } } (ColorType::Gray(16), DecodingBuffer::U16(ref mut buffer)) => { for datum in buffer.iter_mut() { *datum = 0xffff - *datum } } (ColorType::Gray(n), DecodingBuffer::U8(ref mut buffer)) if n <= 8 => { for byte in buffer.iter_mut() { *byte = 0xff - *byte } } (ColorType::Gray(32), DecodingBuffer::F32(ref mut buffer)) => { for datum in buffer.iter_mut() { // FIXME: assumes [0, 1) range for floats *datum = 1.0 - *datum } } (ColorType::Gray(64), DecodingBuffer::F64(ref mut buffer)) => { for datum in buffer.iter_mut() { // FIXME: assumes [0, 1) range for floats *datum = 1.0 - *datum } } _ => {} } } Ok(()) } /// Decompresses the tile into the supplied buffer. /// Returns the number of bytes read. fn expand_tile<'a>( &mut self, mut buffer: DecodingBuffer<'a>, offset: u64, compressed_length: u64, tile: usize, ) -> TiffResult<()> { let color_type = self.colortype()?; let byte_len = buffer.byte_len(); let tile_attrs = self.tile_attributes.as_mut().unwrap(); let (padding_right, padding_down) = tile_attrs.get_padding(tile); let tile_samples = tile_attrs.tile_samples; let tile_length = tile_attrs.tile_length; let row_samples = tile_attrs.row_samples; let padding_right_samples = padding_right * self.bits_per_sample.len(); self.goto_offset_u64(offset)?; let tile_decoder = self.tile_decoder.as_mut().unwrap(); let line_samples = tile_decoder.result_width * self.bits_per_sample.len(); let padding_buffer = &mut tile_decoder.padding_buffer; let mut reader = Self::create_reader( &mut self.reader, self.compression_method, compressed_length, tile_samples, byte_len, )?; for row in 0..(tile_length - padding_down) { let buf = match &mut buffer { DecodingBuffer::U8(buf) => &mut *buf, DecodingBuffer::I8(buf) => bytecast::i8_as_ne_mut_bytes(buf), DecodingBuffer::U16(buf) => bytecast::u16_as_ne_mut_bytes(buf), DecodingBuffer::I16(buf) => bytecast::i16_as_ne_mut_bytes(buf), DecodingBuffer::U32(buf) => bytecast::u32_as_ne_mut_bytes(buf), DecodingBuffer::I32(buf) => bytecast::i32_as_ne_mut_bytes(buf), DecodingBuffer::U64(buf) => bytecast::u64_as_ne_mut_bytes(buf), DecodingBuffer::I64(buf) => bytecast::i64_as_ne_mut_bytes(buf), DecodingBuffer::F32(buf) => bytecast::f32_as_ne_mut_bytes(buf), DecodingBuffer::F64(buf) => bytecast::f64_as_ne_mut_bytes(buf), }; let row_start = row * line_samples; let row_end = row_start + row_samples - padding_right_samples; let row = &mut buf[(row_start * byte_len)..(row_end * byte_len)]; reader.read_exact(row)?; // Skip horizontal padding // TODO: find a better way of skipping the padding if padding_right > 0 { reader.read_exact(padding_buffer)? } // Fix byte order if necessary match self.byte_order { ByteOrder::LittleEndian => match &mut buffer { DecodingBuffer::U8(_) | DecodingBuffer::I8(_) => {} DecodingBuffer::U16(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = u16::from_le(*v)), DecodingBuffer::I16(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = i16::from_le(*v)), DecodingBuffer::U32(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = u32::from_le(*v)), DecodingBuffer::I32(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = i32::from_le(*v)), DecodingBuffer::U64(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = u64::from_le(*v)), DecodingBuffer::I64(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = i64::from_le(*v)), DecodingBuffer::F32(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = f32::from_bits(u32::from_le(v.to_bits()))), DecodingBuffer::F64(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = f64::from_bits(u64::from_le(v.to_bits()))), }, ByteOrder::BigEndian => match &mut buffer { DecodingBuffer::U8(_) | DecodingBuffer::I8(_) => {} DecodingBuffer::U16(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = u16::from_be(*v)), DecodingBuffer::I16(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = i16::from_be(*v)), DecodingBuffer::U32(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = u32::from_be(*v)), DecodingBuffer::I32(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = i32::from_be(*v)), DecodingBuffer::U64(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = u64::from_be(*v)), DecodingBuffer::I64(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = i64::from_be(*v)), DecodingBuffer::F32(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = f32::from_bits(u32::from_be(v.to_bits()))), DecodingBuffer::F64(b) => b[row_start..row_end] .iter_mut() .for_each(|v| *v = f64::from_bits(u64::from_be(v.to_bits()))), }, }; // Invert colors if necessary. if self.photometric_interpretation == PhotometricInterpretation::WhiteIsZero { match (color_type, &mut buffer) { (ColorType::Gray(64), DecodingBuffer::U64(ref mut buffer)) => { process_photometry_u64(&mut buffer[row_start..row_end]); } (ColorType::Gray(32), DecodingBuffer::U32(ref mut buffer)) => { process_photometry_u32(&mut buffer[row_start..row_end]); } (ColorType::Gray(16), DecodingBuffer::U16(ref mut buffer)) => { process_photometry_u16(&mut buffer[row_start..row_end]); } (ColorType::Gray(n), DecodingBuffer::U8(ref mut buffer)) if n <= 8 => { process_photometry_u8(&mut buffer[row_start..row_end]); } (ColorType::Gray(32), DecodingBuffer::F32(ref mut buffer)) => { process_photometry_f32(&mut buffer[row_start..row_end]); } (ColorType::Gray(64), DecodingBuffer::F64(ref mut buffer)) => { process_photometry_f64(&mut buffer[row_start..row_end]); } (ColorType::Gray(n), DecodingBuffer::I8(ref mut buffer)) if n <= 8 => { process_photometry_i8(&mut buffer[row_start..row_end]); } (ColorType::Gray(16), DecodingBuffer::I16(ref mut buffer)) => { process_photometry_i16(&mut buffer[row_start..row_end]); } _ => {} } } } Ok(()) } fn create_reader<'r>( reader: &'r mut SmartReader<R>, compression_method: CompressionMethod, compressed_length: u64, samples: usize, // Expected chunk length in samples byte_len: usize, // Byte length of the result buffer ) -> TiffResult<Box<dyn Read + 'r>> { Ok(match compression_method { CompressionMethod::None => Box::new(reader), CompressionMethod::LZW => Box::new( LZWReader::new( reader, usize::try_from(compressed_length)?, samples * byte_len, )? .1, ), CompressionMethod::PackBits => { Box::new(PackBitsReader::new(reader, usize::try_from(compressed_length)?)?.1) } CompressionMethod::OldDeflate => { Box::new(DeflateReader::new(reader, samples * byte_len)?.1) } method => { return Err(TiffError::UnsupportedError( TiffUnsupportedError::UnsupportedCompressionMethod(method), )) } }) } fn check_chunk_type(&self, expected: ChunkType) -> TiffResult<()> { if expected != self.chunk_type { return Err(TiffError::UsageError(UsageError::InvalidChunkType( expected, self.chunk_type, ))); } Ok(()) } /// The chunk type (Strips / Tiles) of the image pub fn get_chunk_type(&self) -> ChunkType { self.chunk_type } /// Number of strips in image pub fn strip_count(&mut self) -> TiffResult<u32> { self.check_chunk_type(ChunkType::Strip)?; let rows_per_strip = self.get_tag_u32(Tag::RowsPerStrip).unwrap_or(self.height); if rows_per_strip == 0 { return Ok(0); } Ok((self.height + rows_per_strip - 1) / rows_per_strip) } /// Number of tiles in image pub fn tile_count(&mut self) -> TiffResult<u32> { self.check_chunk_type(ChunkType::Tile)?; self.init_tile_attributes()?; let tile_attrs = self.tile_attributes.as_ref().unwrap(); Ok(u32::try_from( tile_attrs.tiles_across * tile_attrs.tiles_down, )?) } fn initialize_strip_decoder(&mut self) -> TiffResult<()> { if self.strip_decoder.is_none() { let strip_offsets = self.get_tag_u64_vec(Tag::StripOffsets)?; let strip_bytes = self.get_tag_u64_vec(Tag::StripByteCounts)?; self.strip_decoder = Some(StripDecodeState { strip_index: 0, strip_offsets, strip_bytes, }); } Ok(()) } fn init_tile_attributes(&mut self) -> TiffResult<()> { if self.tile_attributes.is_none() { let tile_width = usize::try_from(self.get_tag_u32(Tag::TileWidth)?)?; let tile_length = usize::try_from(self.get_tag_u32(Tag::TileLength)?)?; let tiles_across = (usize::try_from(self.width)? + tile_width - 1) / tile_width; let tiles_down = (usize::try_from(self.height)? + tile_length - 1) / tile_length; let samples_per_pixel = self.bits_per_sample.len(); let tile_samples = tile_length * tile_width * samples_per_pixel; let padding_right = (tiles_across * tile_width) - usize::try_from(self.width)?; let tile_strip_samples = (tile_samples * tiles_across) - (padding_right * tile_length * samples_per_pixel); self.tile_attributes = Some(TileAttributes { tile_width, tile_length, tiles_across, tiles_down, tile_samples, padding_right, padding_down: (tiles_down * tile_length) - usize::try_from(self.height)?, row_samples: (tile_width * samples_per_pixel), tile_strip_samples, }); } Ok(()) } fn update_tile_decoder( &mut self, result_width: usize, buffer_byte_len: usize, ) -> TiffResult<()> { let samples_per_pixel = self.bits_per_sample.len(); if self.tile_decoder.is_none() { let tile_attrs = self.tile_attributes.as_ref().unwrap(); let padding_buffer_size = tile_attrs.padding_right * samples_per_pixel * buffer_byte_len; if padding_buffer_size > self.limits.intermediate_buffer_size { return Err(TiffError::LimitsExceeded); } self.tile_decoder = Some(TileDecodeState { current_tile: 0, tile_offsets: self.get_tag_u64_vec(Tag::TileOffsets)?, tile_bytes: self.get_tag_u64_vec(Tag::TileByteCounts)?, padding_buffer: vec![0; padding_buffer_size], result_width: 0, // needs to be updated for differently padded_tiles, see below }) } self.tile_decoder.as_mut().unwrap().result_width = result_width; Ok(()) } pub fn read_jpeg(&mut self) -> TiffResult<DecodingResult> { let offsets = self.get_tag_u32_vec(Tag::StripOffsets)?; let bytes = self.get_tag_u32_vec(Tag::StripByteCounts)?; let jpeg_tables: Option<Vec<u8>> = match self.find_tag(Tag::JPEGTables) { Ok(None) => None, Ok(_) => Some(self.get_tag_u8_vec(Tag::JPEGTables)?), Err(e) => return Err(e), }; if offsets.len() == 0 { return Err(TiffError::FormatError(TiffFormatError::RequiredTagEmpty( Tag::StripOffsets, ))); } if offsets.len() != bytes.len() { return Err(TiffError::FormatError( TiffFormatError::InconsistentSizesEncountered, )); } let mut res_img = Vec::with_capacity(offsets[0] as usize); for (idx, offset) in offsets.iter().enumerate() { self.goto_offset(*offset)?; let jpeg_reader = JpegReader::new(&mut self.reader, bytes[idx], &jpeg_tables)?; let mut decoder = jpeg::Decoder::new(jpeg_reader); match decoder.decode() { Ok(mut val) => res_img.append(&mut val), Err(e) => { return match e { jpeg::Error::Io(io_err) => Err(TiffError::IoError(io_err)), jpeg::Error::Format(fmt_err) => { Err(TiffError::FormatError(TiffFormatError::Format(fmt_err))) } jpeg::Error::Unsupported(_) => Err(TiffError::UnsupportedError( TiffUnsupportedError::UnknownInterpretation, )), jpeg::Error::Internal(_) => Err(TiffError::UnsupportedError( TiffUnsupportedError::UnknownInterpretation, )), } } } } Ok(DecodingResult::U8(res_img)) } pub fn read_strip_to_buffer(&mut self, mut buffer: DecodingBuffer) -> TiffResult<()> { self.initialize_strip_decoder()?; let index = self.strip_decoder.as_ref().unwrap().strip_index; let offset = *self .strip_decoder .as_ref() .unwrap() .strip_offsets .get(index) .ok_or(TiffError::FormatError( TiffFormatError::InconsistentSizesEncountered, ))?; let byte_count = *self .strip_decoder .as_ref() .unwrap() .strip_bytes .get(index) .ok_or(TiffError::FormatError( TiffFormatError::InconsistentSizesEncountered, ))?; let tag_rows = self.get_tag_u32(Tag::RowsPerStrip).unwrap_or(self.height); let rows_per_strip = usize::try_from(tag_rows)?; let sized_width = usize::try_from(self.width)?; let sized_height = usize::try_from(self.height)?; // Ignore potential vertical padding on the bottommost strip let strip_height = cmp::min(rows_per_strip, sized_height - index * rows_per_strip); let buffer_size = sized_width * strip_height * self.bits_per_sample.len(); if buffer.len() < buffer_size { return Err(TiffError::FormatError( TiffFormatError::InconsistentSizesEncountered, )); } self.expand_strip(buffer.prefix(buffer_size), offset, byte_count)?; self.strip_decoder.as_mut().unwrap().strip_index += 1; if u32::try_from(index)? == self.strip_count()? { self.strip_decoder = None; } if let Ok(predictor) = self.get_tag_unsigned(Tag::Predictor) { match Predictor::from_u16(predictor) { Some(Predictor::None) => (), Some(Predictor::Horizontal) => { rev_hpredict( buffer.copy(), (self.width, u32::try_from(strip_height)?), usize::try_from(self.width)?, self.colortype()?, )?; } None => { return Err(TiffError::FormatError(TiffFormatError::UnknownPredictor( predictor, ))) } Some(Predictor::__NonExhaustive) => unreachable!(), } } Ok(()) } fn read_tile_to_buffer(&mut self, result: &mut DecodingBuffer, tile: usize) -> TiffResult<()> { let file_offset = *self .tile_decoder .as_ref() .unwrap() .tile_offsets .get(tile) .ok_or(TiffError::FormatError( TiffFormatError::InconsistentSizesEncountered, ))?; let compressed_bytes = *self .tile_decoder .as_ref() .unwrap() .tile_bytes .get(tile) .ok_or(TiffError::FormatError( TiffFormatError::InconsistentSizesEncountered, ))?; let tile_attrs = self.tile_attributes.as_ref().unwrap(); let tile_width = tile_attrs.tile_width; let tile_length = tile_attrs.tile_length; let (padding_right, padding_down) = tile_attrs.get_padding(tile); self.expand_tile(result.copy(), file_offset, compressed_bytes, tile)?; if let Ok(predictor) = self.get_tag_unsigned(Tag::Predictor) { match Predictor::from_u16(predictor) { Some(Predictor::None) => (), Some(Predictor::Horizontal) => { rev_hpredict( result.copy(), ( u32::try_from(tile_width - padding_right)?, u32::try_from(tile_length - padding_down)?, ), self.tile_decoder.as_ref().unwrap().result_width, self.colortype()?, )?; } None => { return Err(TiffError::FormatError(TiffFormatError::UnknownPredictor( predictor, ))) } Some(Predictor::__NonExhaustive) => unreachable!(), } } Ok(()) } fn result_buffer(&self, width: usize, height: usize) -> TiffResult<DecodingResult> { let buffer_size = width * height * self.bits_per_sample.len(); let max_sample_bits = self.bits_per_sample.iter().cloned().max().unwrap_or(8); match self.sample_format.first().unwrap_or(&SampleFormat::Uint) { SampleFormat::Uint => match max_sample_bits { n if n <= 8 => DecodingResult::new_u8(buffer_size, &self.limits), n if n <= 16 => DecodingResult::new_u16(buffer_size, &self.limits), n if n <= 32 => DecodingResult::new_u32(buffer_size, &self.limits), n if n <= 64 => DecodingResult::new_u64(buffer_size, &self.limits), n => Err(TiffError::UnsupportedError( TiffUnsupportedError::UnsupportedBitsPerChannel(n), )), }, SampleFormat::IEEEFP => match max_sample_bits { 32 => DecodingResult::new_f32(buffer_size, &self.limits), 64 => DecodingResult::new_f64(buffer_size, &self.limits), n => Err(TiffError::UnsupportedError( TiffUnsupportedError::UnsupportedBitsPerChannel(n), )), }, SampleFormat::Int => match max_sample_bits { n if n <= 8 => DecodingResult::new_i8(buffer_size, &self.limits), n if n <= 16 => DecodingResult::new_i16(buffer_size, &self.limits), n if n <= 32 => DecodingResult::new_i32(buffer_size, &self.limits), n if n <= 64 => DecodingResult::new_i64(buffer_size, &self.limits), n => Err(TiffError::UnsupportedError( TiffUnsupportedError::UnsupportedBitsPerChannel(n), )), }, format => { Err(TiffUnsupportedError::UnsupportedSampleFormat(vec![format.clone()]).into()) } } } /// Read a single strip from the image and return it as a Vector pub fn read_strip(&mut self) -> TiffResult<DecodingResult> { self.check_chunk_type(ChunkType::Strip)?; self.initialize_strip_decoder()?; let index = self.strip_decoder.as_ref().unwrap().strip_index; let rows_per_strip = usize::try_from(self.get_tag_u32(Tag::RowsPerStrip).unwrap_or(self.height))?; let strip_height = cmp::min( rows_per_strip, usize::try_from(self.height)? - index * rows_per_strip, ); let mut result = self.result_buffer(usize::try_from(self.width)?, strip_height)?; self.read_strip_to_buffer(result.as_buffer(0))?; Ok(result) } /// Read a single tile from the image and return it as a Vector pub fn read_tile(&mut self) -> TiffResult<DecodingResult> { self.check_chunk_type(ChunkType::Tile)?; self.init_tile_attributes()?; let tile = self.tile_decoder.as_ref().map_or(0, |d| d.current_tile); let tile_attrs = self.tile_attributes.as_ref().unwrap(); let (padding_right, padding_down) = tile_attrs.get_padding(tile); let tile_width = tile_attrs.tile_width - padding_right; let tile_length = tile_attrs.tile_length - padding_down; let mut result = self.result_buffer(tile_width, tile_length)?; self.update_tile_decoder(tile_width, result.as_buffer(0).byte_len())?; self.read_tile_to_buffer(&mut result.as_buffer(0), tile)?; self.tile_decoder.as_mut().unwrap().current_tile += 1; Ok(result) } fn read_tiled_image(&mut self) -> TiffResult<DecodingResult> { let width = usize::try_from(self.width)?; let mut result = self.result_buffer(width, usize::try_from(self.height)?)?; self.init_tile_attributes()?; self.update_tile_decoder(width, result.as_buffer(0).byte_len())?; let tile_attrs = self.tile_attributes.as_ref().unwrap(); let tiles_across = tile_attrs.tiles_across; let tiles_down = tile_attrs.tiles_down; for tile in 0..(tiles_across * tiles_down) { let buffer_offset = self.tile_attributes.as_ref().unwrap().get_offset(tile); self.read_tile_to_buffer(&mut result.as_buffer(buffer_offset), tile)?; } Ok(result) } fn read_stripped_image(&mut self) -> TiffResult<DecodingResult> { self.initialize_strip_decoder()?; let rows_per_strip = usize::try_from(self.get_tag_u32(Tag::RowsPerStrip).unwrap_or(self.height))?; let samples_per_strip = usize::try_from(self.width)? * rows_per_strip * self.bits_per_sample.len(); let mut result = self.result_buffer(usize::try_from(self.width)?, usize::try_from(self.height)?)?; for i in 0..usize::try_from(self.strip_count()?)? { let r = result.as_buffer(samples_per_strip * i); self.read_strip_to_buffer(r)?; } Ok(result) } /// Decodes the entire image and return it as a Vector pub fn read_image(&mut self) -> TiffResult<DecodingResult> { let result = match (self.chunk_type, self.compression_method) { (_, CompressionMethod::ModernJPEG) => self.read_jpeg()?, (ChunkType::Strip, _) => self.read_stripped_image()?, (ChunkType::Tile, _) => self.read_tiled_image()?, }; Ok(result) } }
36.961262
110
0.54521
e91daf72d90b4f763230afd411326b9fac682b04
1,946
use super::InternalEvent; use metrics::counter; #[derive(Debug)] pub struct UnixSocketConnectionEstablished<'a> { pub path: &'a std::path::Path, } impl InternalEvent for UnixSocketConnectionEstablished<'_> { fn emit_logs(&self) { debug!(message = "Connected", path = ?self.path); } fn emit_metrics(&self) { counter!("unix_socket_connections_established", 1, "component_kind" => "sink", ); } } #[derive(Debug)] pub struct UnixSocketConnectionFailure<'a> { pub error: tokio::io::Error, pub path: &'a std::path::Path, } impl InternalEvent for UnixSocketConnectionFailure<'_> { fn emit_logs(&self) { error!( message = "Unix socket connection failure", error = %self.error, path = ?self.path, ); } fn emit_metrics(&self) { counter!("unix_socket_connection_failures", 1, "component_kind" => "sink", ); } } #[derive(Debug)] pub struct UnixSocketError<'a, E> { pub error: E, pub path: &'a std::path::Path, } impl<E: From<std::io::Error> + std::fmt::Debug + std::fmt::Display> InternalEvent for UnixSocketError<'_, E> { fn emit_logs(&self) { debug!( message = "unix socket error.", error = %self.error, path = ?self.path, ); } fn emit_metrics(&self) { counter!("unix_socket_errors", 1); } } #[derive(Debug)] pub struct UnixSocketEventSent { pub byte_size: usize, } impl InternalEvent for UnixSocketEventSent { fn emit_metrics(&self) { counter!("events_processed", 1, "component_kind" => "sink", "component_type" => "socket", "mode" => "unix", ); counter!("bytes_processed", self.byte_size as u64, "component_kind" => "sink", "component_type" => "socket", "mode" => "unix", ); } }
23.166667
81
0.565262
bf06a6742e3bf6b6e4c6e17431ea98267f35a304
1,412
use tracing::{info, info_span, trace, Dispatch}; use tracing_limit::RateLimitedLayer; use tracing_subscriber::layer::SubscriberExt; fn main() { let subscriber = tracing_subscriber::registry::Registry::default() .with(RateLimitedLayer::new( tracing_subscriber::fmt::Layer::default().without_time(), )) .with(tracing_subscriber::filter::EnvFilter::from("trace")); let dispatch = Dispatch::new(subscriber); tracing::dispatcher::with_default(&dispatch, || { for i in 0..40usize { trace!("This field is not rate limited!"); for key in &["foo", "bar"] { for line_number in &[1, 2] { let span = info_span!( "sink", component_kind = "sink", component_id = &key, component_type = "fake", vrl_line_number = &line_number, ); let _enter = span.enter(); info!( message = "This message is rate limited by its component and vrl_line_number", count = &i, internal_log_rate_secs = 5, ); } } std::thread::sleep(std::time::Duration::from_millis(1000)); } }) }
36.205128
96
0.482295
ff232b0c8797a81373f08231724e0c3471938022
2,963
pub mod channel; pub mod message; pub enum Color { Red, #[allow(dead_code)] Green, #[allow(dead_code)] Yellow, #[allow(dead_code)] Blue, #[allow(dead_code)] Magenta, #[allow(dead_code)] Cyan, #[allow(dead_code)] White, } pub trait Wrap { fn wrap(&self, line_length: usize) -> String; } impl<T> Wrap for T where T: AsRef<str>, { fn wrap(&self, line_length: usize) -> String { use std::cmp::min; let s = self.as_ref(); let mut x = 0_usize; let mut y = min(s.len(), line_length); let mut ret = String::with_capacity(s.len() + s.len() / line_length); while x <= y && y < s.len() { let mut range_end = (&s[x..y]).rfind(' ').unwrap_or_else(|| s[x..y].len()); let newline = (&s[x..y]).rfind('\n'); if let Some(n) = newline { range_end = n; } y = x + range_end; ret.push_str(&s[x..y]); ret.push('\n'); x = y + 1; y = min(s.len(), x + line_length); } ret.push_str(&s[x..s.len()]); ret } } impl Color { fn to_code(&self) -> &str { match self { Color::Red => RED, Color::Green => GREEN, Color::Yellow => YELLOW, Color::Blue => BLUE, Color::Magenta => MAGENTA, Color::Cyan => CYAN, Color::White => WHITE, } } } const RED: &str = "[31m"; const GREEN: &str = "[32m"; const YELLOW: &str = "[33m"; const BLUE: &str = "[34m"; const MAGENTA: &str = "[35m"; const CYAN: &str = "[36m"; const WHITE: &str = "[37m"; #[cfg(test)] mod text_test { use super::Wrap; const DESC: &'static str = "You are at the Temple Yard of Dragonia. Beautiful marble stairs lead \ up to the Temple of Dragonia. You feel small as you stare up the huge pillars making the entrance \ to the temple. This place serves as a sanctuary where the people of the city can come and seek \ refuge, and rest their tired bones. Just north of here is the common square, and the temple opens \ to the south."; #[test] fn test_line_wrap() { assert_eq!("abcd".wrap(7), "abcd"); assert_eq!( "1 3 5 7 9 1 39234 290 290 5 7 9".wrap(10), "1 3 5 7 9\n1 39234\n290 290 5\n7 9" ); assert_eq!( "1 3 5 7 9 1 39234 290 290 5 7 9".wrap(10), "1 3 5 7 9\n1 39234\n290 290 5\n7 9" ); } #[test] fn test_line_wrap_idempotency() { let desc = DESC.wrap(80); let second = desc.wrap(80); assert_eq!(desc, second.wrap(80)); assert_ne!(desc, second.wrap(75)); } } pub fn article(noun: &str) -> String { let suffix = match noun.to_lowercase().chars().next().unwrap_or('\0') { 'a' | 'e' | 'i' | 'o' | 'u' => "n", _ => "", }; format!("a{} {}", suffix, noun) }
24.487603
103
0.511306
de4cd6d66b70533166e79ea956b2bb0697b3b6ba
5,466
use fmterr::fmt_err; use perseus::{ errors::err_to_status_code, internal::{ get_path_prefix_server, i18n::{TranslationsManager, Translator}, router::{match_route_atomic, RouteInfoAtomic, RouteVerdictAtomic}, serve::{ build_error_page, get_page_for_template, get_path_slice, GetPageProps, HtmlShell, ServerOptions, }, }, stores::{ImmutableStore, MutableStore}, ErrorPages, SsrNode, }; use std::{collections::HashMap, rc::Rc, sync::Arc}; use warp::{http::Response, path::FullPath}; /// Builds on the internal Perseus primitives to provide a utility function that returns a `Response` automatically. fn return_error_page( url: &str, status: u16, // This should already have been transformed into a string (with a source chain etc.) err: &str, translator: Option<Rc<Translator>>, error_pages: &ErrorPages<SsrNode>, html_shell: &HtmlShell, ) -> Response<String> { let html = build_error_page(url, status, err, translator, error_pages, html_shell); Response::builder().status(status).body(html).unwrap() } /// The handler for calls to any actual pages (first-time visits), which will render the appropriate HTML and then interpolate it into /// the app shell. #[allow(clippy::too_many_arguments)] // As for `page_data_handler`, we don't have a choice pub async fn initial_load_handler<M: MutableStore, T: TranslationsManager>( path: FullPath, req: perseus::http::Request<()>, opts: Arc<ServerOptions>, html_shell: Arc<HtmlShell>, render_cfg: Arc<HashMap<String, String>>, immutable_store: Arc<ImmutableStore>, mutable_store: Arc<M>, translations_manager: Arc<T>, global_state: Arc<Option<String>>, ) -> Response<String> { let path = path.as_str(); let templates = &opts.templates_map; let error_pages = &opts.error_pages; let path_slice = get_path_slice(path); // Create a closure to make returning error pages easier (most have the same data) let html_err = |status: u16, err: &str| { return return_error_page(path, status, err, None, error_pages, html_shell.as_ref()); }; // Run the routing algorithms on the path to figure out which template we need let verdict = match_route_atomic(&path_slice, render_cfg.as_ref(), templates, &opts.locales); match verdict { // If this is the outcome, we know that the locale is supported and the like // Given that all this is valid from the client, any errors are 500s RouteVerdictAtomic::Found(RouteInfoAtomic { path, // Used for asset fetching, this is what we'd get in `page_data` template, // The actual template to use locale, was_incremental_match, }) => { // Actually render the page as we would if this weren't an initial load let page_data = get_page_for_template( GetPageProps::<M, T> { raw_path: &path, locale: &locale, was_incremental_match, req, global_state: &global_state, immutable_store: &immutable_store, mutable_store: &mutable_store, translations_manager: &translations_manager, }, template, ) .await; let page_data = match page_data { Ok(page_data) => page_data, // We parse the error to return an appropriate status code Err(err) => { return html_err(err_to_status_code(&err), &fmt_err(&err)); } }; let final_html = html_shell .as_ref() .clone() .page_data(&page_data, &global_state) .to_string(); let mut http_res = Response::builder().status(200); // http_res.content_type("text/html"); // Generate and add HTTP headers for (key, val) in template.get_headers(page_data.state) { http_res = http_res.header(key.unwrap(), val); } http_res.body(final_html).unwrap() } // For locale detection, we don't know the user's locale, so there's not much we can do except send down the app shell, which will do the rest and fetch from `.perseus/page/...` RouteVerdictAtomic::LocaleDetection(path) => { // We use a `302 Found` status code to indicate a redirect // We 'should' generate a `Location` field for the redirect, but it's not RFC-mandated, so we can use the app shell Response::builder() .status(200) .body( html_shell .as_ref() .clone() .locale_redirection_fallback( // We'll redirect the user to the default locale &format!( "{}/{}/{}", get_path_prefix_server(), opts.locales.default, path ), ) .to_string(), ) .unwrap() } RouteVerdictAtomic::NotFound => html_err(404, "page not found"), } }
41.725191
185
0.571716
9163d74b339f30f807ee02655489329dea43571b
10,484
mod convert; use crate::{CalculatedSize, Node, Style}; use bevy_app::EventReader; use bevy_ecs::{ entity::Entity, query::{Changed, FilterFetch, With, Without, WorldQuery}, system::{Query, Res, ResMut}, }; use bevy_log::warn; use bevy_math::Vec2; use bevy_transform::prelude::{Children, Parent, Transform}; use bevy_utils::HashMap; use bevy_window::{Window, WindowId, WindowScaleFactorChanged, Windows}; use std::fmt; use stretch::{number::Number, Stretch}; pub struct FlexSurface { entity_to_stretch: HashMap<Entity, stretch::node::Node>, window_nodes: HashMap<WindowId, stretch::node::Node>, stretch: Stretch, } // SAFE: as long as MeasureFunc is Send + Sync. https://github.com/vislyhq/stretch/issues/69 unsafe impl Send for FlexSurface {} unsafe impl Sync for FlexSurface {} fn _assert_send_sync_flex_surface_impl_safe() { fn _assert_send_sync<T: Send + Sync>() {} _assert_send_sync::<HashMap<Entity, stretch::node::Node>>(); _assert_send_sync::<HashMap<WindowId, stretch::node::Node>>(); // FIXME https://github.com/vislyhq/stretch/issues/69 // _assert_send_sync::<Stretch>(); } impl fmt::Debug for FlexSurface { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("FlexSurface") .field("entity_to_stretch", &self.entity_to_stretch) .field("window_nodes", &self.window_nodes) .finish() } } impl Default for FlexSurface { fn default() -> Self { Self { entity_to_stretch: Default::default(), window_nodes: Default::default(), stretch: Stretch::new(), } } } impl FlexSurface { pub fn upsert_node(&mut self, entity: Entity, style: &Style, scale_factor: f64) { let mut added = false; let stretch = &mut self.stretch; let stretch_style = convert::from_style(scale_factor, style); let stretch_node = self.entity_to_stretch.entry(entity).or_insert_with(|| { added = true; stretch.new_node(stretch_style, Vec::new()).unwrap() }); if !added { self.stretch .set_style(*stretch_node, stretch_style) .unwrap(); } } pub fn upsert_leaf( &mut self, entity: Entity, style: &Style, calculated_size: CalculatedSize, scale_factor: f64, ) { let stretch = &mut self.stretch; let stretch_style = convert::from_style(scale_factor, style); let measure = Box::new(move |constraints: stretch::geometry::Size<Number>| { let mut size = convert::from_f32_size(scale_factor, calculated_size.size); match (constraints.width, constraints.height) { (Number::Undefined, Number::Undefined) => {} (Number::Defined(width), Number::Undefined) => { size.height = width * size.height / size.width; size.width = width; } (Number::Undefined, Number::Defined(height)) => { size.width = height * size.width / size.height; size.height = height; } (Number::Defined(width), Number::Defined(height)) => { size.width = width; size.height = height; } } Ok(size) }); if let Some(stretch_node) = self.entity_to_stretch.get(&entity) { self.stretch .set_style(*stretch_node, stretch_style) .unwrap(); self.stretch .set_measure(*stretch_node, Some(measure)) .unwrap(); } else { let stretch_node = stretch.new_leaf(stretch_style, measure).unwrap(); self.entity_to_stretch.insert(entity, stretch_node); } } pub fn update_children(&mut self, entity: Entity, children: &Children) { let mut stretch_children = Vec::with_capacity(children.len()); for child in children.iter() { if let Some(stretch_node) = self.entity_to_stretch.get(child) { stretch_children.push(*stretch_node); } else { warn!( "Unstyled child in a UI entity hierarchy. You are using an entity \ without UI components as a child of an entity with UI components, results may be unexpected." ); } } let stretch_node = self.entity_to_stretch.get(&entity).unwrap(); self.stretch .set_children(*stretch_node, stretch_children) .unwrap(); } pub fn update_window(&mut self, window: &Window) { let stretch = &mut self.stretch; let node = self.window_nodes.entry(window.id()).or_insert_with(|| { stretch .new_node(stretch::style::Style::default(), Vec::new()) .unwrap() }); stretch .set_style( *node, stretch::style::Style { size: stretch::geometry::Size { width: stretch::style::Dimension::Points(window.physical_width() as f32), height: stretch::style::Dimension::Points(window.physical_height() as f32), }, ..Default::default() }, ) .unwrap(); } pub fn set_window_children( &mut self, window_id: WindowId, children: impl Iterator<Item = Entity>, ) { let stretch_node = self.window_nodes.get(&window_id).unwrap(); let child_nodes = children .map(|e| *self.entity_to_stretch.get(&e).unwrap()) .collect::<Vec<stretch::node::Node>>(); self.stretch .set_children(*stretch_node, child_nodes) .unwrap(); } pub fn compute_window_layouts(&mut self) { for window_node in self.window_nodes.values() { self.stretch .compute_layout(*window_node, stretch::geometry::Size::undefined()) .unwrap(); } } pub fn get_layout(&self, entity: Entity) -> Result<&stretch::result::Layout, FlexError> { if let Some(stretch_node) = self.entity_to_stretch.get(&entity) { self.stretch .layout(*stretch_node) .map_err(FlexError::StretchError) } else { warn!( "Styled child in a non-UI entity hierarchy. You are using an entity \ with UI components as a child of an entity without UI components, results may be unexpected." ); Err(FlexError::InvalidHierarchy) } } } #[derive(Debug)] pub enum FlexError { InvalidHierarchy, StretchError(stretch::Error), } #[allow(clippy::too_many_arguments)] pub fn flex_node_system( windows: Res<Windows>, mut scale_factor_events: EventReader<WindowScaleFactorChanged>, mut flex_surface: ResMut<FlexSurface>, root_node_query: Query<Entity, (With<Node>, Without<Parent>)>, node_query: Query<(Entity, &Style, Option<&CalculatedSize>), (With<Node>, Changed<Style>)>, full_node_query: Query<(Entity, &Style, Option<&CalculatedSize>), With<Node>>, changed_size_query: Query< (Entity, &Style, &CalculatedSize), (With<Node>, Changed<CalculatedSize>), >, children_query: Query<(Entity, &Children), (With<Node>, Changed<Children>)>, mut node_transform_query: Query<(Entity, &mut Node, &mut Transform, Option<&Parent>)>, ) { // update window root nodes for window in windows.iter() { flex_surface.update_window(window); } // assume one window for time being... let logical_to_physical_factor = if let Some(primary_window) = windows.get_primary() { primary_window.scale_factor() } else { 1. }; if scale_factor_events.iter().next_back().is_some() { update_changed( &mut *flex_surface, logical_to_physical_factor, full_node_query, ); } else { update_changed(&mut *flex_surface, logical_to_physical_factor, node_query); } fn update_changed<F: WorldQuery>( flex_surface: &mut FlexSurface, scaling_factor: f64, query: Query<(Entity, &Style, Option<&CalculatedSize>), F>, ) where F::Fetch: FilterFetch, { // update changed nodes for (entity, style, calculated_size) in query.iter() { // TODO: remove node from old hierarchy if its root has changed if let Some(calculated_size) = calculated_size { flex_surface.upsert_leaf(entity, &style, *calculated_size, scaling_factor); } else { flex_surface.upsert_node(entity, &style, scaling_factor); } } } for (entity, style, calculated_size) in changed_size_query.iter() { flex_surface.upsert_leaf(entity, &style, *calculated_size, logical_to_physical_factor); } // TODO: handle removed nodes // update window children (for now assuming all Nodes live in the primary window) if let Some(primary_window) = windows.get_primary() { flex_surface.set_window_children(primary_window.id(), root_node_query.iter()); } // update children for (entity, children) in children_query.iter() { flex_surface.update_children(entity, &children); } // compute layouts flex_surface.compute_window_layouts(); let physical_to_logical_factor = 1. / logical_to_physical_factor; let to_logical = |v| (physical_to_logical_factor * v as f64) as f32; // PERF: try doing this incrementally for (entity, mut node, mut transform, parent) in node_transform_query.iter_mut() { let layout = flex_surface.get_layout(entity).unwrap(); node.size = Vec2::new( to_logical(layout.size.width), to_logical(layout.size.height), ); let position = &mut transform.translation; position.x = to_logical(layout.location.x + layout.size.width / 2.0); position.y = to_logical(layout.location.y + layout.size.height / 2.0); if let Some(parent) = parent { if let Ok(parent_layout) = flex_surface.get_layout(parent.0) { position.x -= to_logical(parent_layout.size.width / 2.0); position.y -= to_logical(parent_layout.size.height / 2.0); } } } }
35.659864
99
0.5971
9bf7db770a9dc65996622b0e0f0464bcf73de97a
20,181
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Operation { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<operation::Display>, } pub mod operation { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Display { #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Operation>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Workspace { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkspaceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceProperties { #[serde(rename = "workspaceId", default, skip_serializing_if = "Option::is_none")] pub workspace_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")] pub creation_time: Option<String>, #[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")] pub key_vault: Option<String>, #[serde(rename = "applicationInsights", default, skip_serializing_if = "Option::is_none")] pub application_insights: Option<String>, #[serde(rename = "containerRegistry", default, skip_serializing_if = "Option::is_none")] pub container_registry: Option<String>, #[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")] pub storage_account: Option<String>, #[serde(rename = "discoveryUrl", default, skip_serializing_if = "Option::is_none")] pub discovery_url: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<workspace_properties::ProvisioningState>, } pub mod workspace_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Unknown, Updating, Creating, Deleting, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkspacePropertiesUpdateParameters>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspacePropertiesUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UsageName { #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, #[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")] pub localized_value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Usage { #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<usage::Unit>, #[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")] pub current_value: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<UsageName>, } pub mod usage { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Unit { Count, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListUsagesResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Usage>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineSize { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub family: Option<String>, #[serde(rename = "vCPUs", default, skip_serializing_if = "Option::is_none")] pub v_cp_us: Option<i32>, #[serde(rename = "osVhdSizeMB", default, skip_serializing_if = "Option::is_none")] pub os_vhd_size_mb: Option<i32>, #[serde(rename = "maxResourceVolumeMB", default, skip_serializing_if = "Option::is_none")] pub max_resource_volume_mb: Option<i32>, #[serde(rename = "memoryGB", default, skip_serializing_if = "Option::is_none")] pub memory_gb: Option<f64>, #[serde(rename = "lowPriorityCapable", default, skip_serializing_if = "Option::is_none")] pub low_priority_capable: Option<bool>, #[serde(rename = "premiumIO", default, skip_serializing_if = "Option::is_none")] pub premium_io: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineSizeListResult { #[serde(rename = "amlCompute", default, skip_serializing_if = "Vec::is_empty")] pub aml_compute: Vec<VirtualMachineSize>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Workspace>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Identity { #[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")] pub principal_id: Option<String>, #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<identity::Type>, } pub mod identity { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { SystemAssigned, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<Identity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceId { pub id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListWorkspaceKeysResult { #[serde(rename = "userStorageKey", default, skip_serializing_if = "Option::is_none")] pub user_storage_key: Option<String>, #[serde(rename = "userStorageResourceId", default, skip_serializing_if = "Option::is_none")] pub user_storage_resource_id: Option<String>, #[serde(rename = "appInsightsInstrumentationKey", default, skip_serializing_if = "Option::is_none")] pub app_insights_instrumentation_key: Option<String>, #[serde(rename = "containerRegistryCredentials", default, skip_serializing_if = "Option::is_none")] pub container_registry_credentials: Option<RegistryListCredentialsResult>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryListCredentialsResult { #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub username: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub passwords: Vec<Password>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Password { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PaginatedComputeResourcesList { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ComputeResource>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeResource { #[serde(flatten)] pub resource: Resource, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Compute { #[serde(rename = "computeType")] pub compute_type: ComputeType, #[serde(rename = "computeLocation", default, skip_serializing_if = "Option::is_none")] pub compute_location: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<compute::ProvisioningState>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "createdOn", default, skip_serializing_if = "Option::is_none")] pub created_on: Option<String>, #[serde(rename = "modifiedOn", default, skip_serializing_if = "Option::is_none")] pub modified_on: Option<String>, #[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")] pub resource_id: Option<String>, #[serde(rename = "provisioningErrors", default, skip_serializing_if = "Vec::is_empty")] pub provisioning_errors: Vec<MachineLearningServiceError>, #[serde(rename = "isAttachedCompute", default, skip_serializing_if = "Option::is_none")] pub is_attached_compute: Option<bool>, } pub mod compute { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Unknown, Updating, Creating, Deleting, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Aks { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AmlCompute { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachine { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct HdInsight { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataFactory { #[serde(flatten)] pub compute: Compute, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Databricks { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataLakeAnalytics { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServicePrincipalCredentials { #[serde(rename = "clientId")] pub client_id: String, #[serde(rename = "clientSecret")] pub client_secret: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SystemService { #[serde(rename = "systemServiceType", default, skip_serializing_if = "Option::is_none")] pub system_service_type: Option<String>, #[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")] pub public_ip_address: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SslConfiguration { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<ssl_configuration::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub cert: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub cname: Option<String>, } pub mod ssl_configuration { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Disabled, Enabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AksNetworkingConfiguration { #[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")] pub subnet_id: Option<String>, #[serde(rename = "serviceCidr", default, skip_serializing_if = "Option::is_none")] pub service_cidr: Option<String>, #[serde(rename = "dnsServiceIP", default, skip_serializing_if = "Option::is_none")] pub dns_service_ip: Option<String>, #[serde(rename = "dockerBridgeCidr", default, skip_serializing_if = "Option::is_none")] pub docker_bridge_cidr: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserAccountCredentials { #[serde(rename = "adminUserName")] pub admin_user_name: String, #[serde(rename = "adminUserSshPublicKey", default, skip_serializing_if = "Option::is_none")] pub admin_user_ssh_public_key: Option<String>, #[serde(rename = "adminUserPassword", default, skip_serializing_if = "Option::is_none")] pub admin_user_password: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScaleSettings { #[serde(rename = "maxNodeCount")] pub max_node_count: i64, #[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")] pub min_node_count: Option<i64>, #[serde(rename = "nodeIdleTimeBeforeScaleDown", default, skip_serializing_if = "Option::is_none")] pub node_idle_time_before_scale_down: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NodeStateCounts { #[serde(rename = "idleNodeCount", default, skip_serializing_if = "Option::is_none")] pub idle_node_count: Option<i32>, #[serde(rename = "runningNodeCount", default, skip_serializing_if = "Option::is_none")] pub running_node_count: Option<i32>, #[serde(rename = "preparingNodeCount", default, skip_serializing_if = "Option::is_none")] pub preparing_node_count: Option<i32>, #[serde(rename = "unusableNodeCount", default, skip_serializing_if = "Option::is_none")] pub unusable_node_count: Option<i32>, #[serde(rename = "leavingNodeCount", default, skip_serializing_if = "Option::is_none")] pub leaving_node_count: Option<i32>, #[serde(rename = "preemptedNodeCount", default, skip_serializing_if = "Option::is_none")] pub preempted_node_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClusterUpdateProperties { #[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")] pub scale_settings: Option<ScaleSettings>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClusterUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ClusterUpdateProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeNodesInformation { #[serde(rename = "computeType")] pub compute_type: ComputeType, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AmlComputeNodesInformation { #[serde(flatten)] pub compute_nodes_information: ComputeNodesInformation, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AmlComputeNodeInformation { #[serde(rename = "nodeId", default, skip_serializing_if = "Option::is_none")] pub node_id: Option<String>, #[serde(rename = "ipAddress", default, skip_serializing_if = "Option::is_none")] pub ip_address: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub port: Option<f64>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineSshCredentials { #[serde(default, skip_serializing_if = "Option::is_none")] pub username: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub password: Option<String>, #[serde(rename = "publicKeyData", default, skip_serializing_if = "Option::is_none")] pub public_key_data: Option<String>, #[serde(rename = "privateKeyData", default, skip_serializing_if = "Option::is_none")] pub private_key_data: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeSecrets { #[serde(rename = "computeType")] pub compute_type: ComputeType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AksComputeSecrets { #[serde(flatten)] pub compute_secrets: ComputeSecrets, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineSecrets { #[serde(flatten)] pub compute_secrets: ComputeSecrets, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatabricksComputeSecrets { #[serde(flatten)] pub compute_secrets: ComputeSecrets, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ComputeType { #[serde(rename = "AKS")] Aks, AmlCompute, DataFactory, VirtualMachine, #[serde(rename = "HDInsight")] HdInsight, Databricks, DataLakeAnalytics, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MachineLearningServiceError { #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<ErrorResponse>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub details: Vec<ErrorDetail>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorDetail { pub code: String, pub message: String, }
41.354508
104
0.708984
037e68331cfe76518d888c5e06b429461121ff6b
2,027
//! Adds support for capturing Sentry errors from `anyhow::Error`. //! //! # Example //! //! ```no_run //! # fn function_that_might_fail() -> anyhow::Result<()> { Ok(()) } //! use sentry_anyhow::capture_anyhow; //! # fn test() -> anyhow::Result<()> { //! let result = match function_that_might_fail() { //! Ok(result) => result, //! Err(err) => { //! capture_anyhow(&err); //! return Err(err); //! } //! }; //! # Ok(()) } //! ``` #![doc(html_favicon_url = "https://sentry-brand.storage.googleapis.com/favicon.ico")] #![doc(html_logo_url = "https://sentry-brand.storage.googleapis.com/sentry-glyph-black.png")] #![warn(missing_docs)] #![deny(unsafe_code)] use std::error::Error; use std::fmt; use sentry_core::types::Uuid; use sentry_core::Hub; /// Captures an `anyhow::Error`. /// /// See [module level documentation](index.html) for more information. pub fn capture_anyhow(e: &anyhow::Error) -> Uuid { Hub::with_active(|hub| hub.capture_anyhow(e)) } /// Hub extension methods for working with `anyhow`. pub trait AnyhowHubExt { /// Captures an `anyhow::Error` on a specific hub. fn capture_anyhow(&self, e: &anyhow::Error) -> Uuid; } impl AnyhowHubExt for Hub { fn capture_anyhow(&self, e: &anyhow::Error) -> Uuid { self.capture_error(&AnyhowError(e)) } } // `anyhow::Error` itself does not impl `std::error::Error`, because it would // be incoherent. This can be worked around by wrapping it in a newtype // which impls `std::error::Error`. // Code adopted from: https://github.com/dtolnay/anyhow/issues/63#issuecomment-590983511 struct AnyhowError<'a>(&'a anyhow::Error); impl fmt::Debug for AnyhowError<'_> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(fmt) } } impl fmt::Display for AnyhowError<'_> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(fmt) } } impl Error for AnyhowError<'_> { fn source(&self) -> Option<&(dyn Error + 'static)> { self.0.source() } }
28.152778
93
0.633942
2f0faaf9aa2c56060dcbf392cb10bd7db5bac5a1
1,385
#[macro_use] extern crate log; extern crate env_logger; extern crate serenity; use serenity::prelude::*; use serenity::model::event::ResumedEvent; use serenity::model::gateway::Ready; use std::env; struct Handler; impl EventHandler for Handler { fn ready(&self, _: Context, ready: Ready) { // Log at the INFO level. This is a macro from the `log` crate. info!("{} is connected!", ready.user.name); } fn resume(&self, _: Context, resume: ResumedEvent) { // Log at the DEBUG level. // // In this example, this will not show up in the logs because DEBUG is // below INFO, which is the set debug level. debug!("Resumed; trace: {:?}", resume.trace); } } fn main() { // Call env_logger's initialize function, which configures `log` via // environment variables. // // For example, you can say to log all levels INFO and up via setting the // environment variable `RUST_LOG` to `INFO`. env_logger::init().expect("Unable to init env_logger"); // Configure the client with your Discord bot token in the environment. let token = env::var("DISCORD_TOKEN") .expect("Expected a token in the environment"); let mut client = Client::new(&token, Handler).expect("Err creating client"); if let Err(why) = client.start() { error!("Client error: {:?}", why); } }
30.108696
80
0.640433
67201b2ecd3a1185a0d22933358ddf08d8e3e1af
931
use lib::run; use lib::rlib_run; use std::env; use std::error::Error; use std::ffi::CString; use std::os::raw::{c_char, c_int}; use std::ptr; fn main() { ffi_main(); } // For systems where the FFI is used. fn ffi_main() { let args: Vec<CString> = env::args() .map(|s| CString::new(s).expect("CString::new failed")) .collect(); let c_args: Vec<*const c_char> = args .iter() .map(|a| a.as_ptr()) .collect(); let argc: c_int = args.len() as c_int; let argv: *const *const c_char = c_args.as_ptr(); run(argc, argv); } // For systems where everything is written in Rust. fn _rlib_main() -> Result<(), Box<dyn Error>> { let mut env_args: Vec<String> = env::args().collect(); let args: Vec<&str> = env_args .iter_mut() .map(|s| s.as_str()) .collect(); rlib_run(args) } // For systems where command line arguments do not make sense. fn _no_arg_ffi_main() { run(0, ptr::null()); }
21.159091
62
0.620838
64d718c214eec123d4cafbd1810f0811f9a8f5ed
5,096
#[doc = "Register `AUTOCOLRESCONFIG` reader"] pub struct R(crate::R<AUTOCOLRESCONFIG_SPEC>); impl core::ops::Deref for R { type Target = crate::R<AUTOCOLRESCONFIG_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<AUTOCOLRESCONFIG_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<AUTOCOLRESCONFIG_SPEC>) -> Self { R(reader) } } #[doc = "Register `AUTOCOLRESCONFIG` writer"] pub struct W(crate::W<AUTOCOLRESCONFIG_SPEC>); impl core::ops::Deref for W { type Target = crate::W<AUTOCOLRESCONFIG_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<AUTOCOLRESCONFIG_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<AUTOCOLRESCONFIG_SPEC>) -> Self { W(writer) } } #[doc = "Enables/disables auto collision resolution\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum MODE_A { #[doc = "0: Auto collision resolution enabled"] ENABLED = 0, #[doc = "1: Auto collision resolution disabled"] DISABLED = 1, } impl From<MODE_A> for bool { #[inline(always)] fn from(variant: MODE_A) -> Self { variant as u8 != 0 } } #[doc = "Field `MODE` reader - Enables/disables auto collision resolution"] pub struct MODE_R(crate::FieldReader<bool, MODE_A>); impl MODE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { MODE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> MODE_A { match self.bits { false => MODE_A::ENABLED, true => MODE_A::DISABLED, } } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == MODE_A::ENABLED } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == MODE_A::DISABLED } } impl core::ops::Deref for MODE_R { type Target = crate::FieldReader<bool, MODE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `MODE` writer - Enables/disables auto collision resolution"] pub struct MODE_W<'a> { w: &'a mut W, } impl<'a> MODE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: MODE_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Auto collision resolution enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(MODE_A::ENABLED) } #[doc = "Auto collision resolution disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(MODE_A::DISABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } impl R { #[doc = "Bit 0 - Enables/disables auto collision resolution"] #[inline(always)] pub fn mode(&self) -> MODE_R { MODE_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Enables/disables auto collision resolution"] #[inline(always)] pub fn mode(&mut self) -> MODE_W { MODE_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Controls the auto collision resolution function. This setting must be done before the NFCT peripheral is activated.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [autocolresconfig](index.html) module"] pub struct AUTOCOLRESCONFIG_SPEC; impl crate::RegisterSpec for AUTOCOLRESCONFIG_SPEC { type Ux = u32; } #[doc = "`read()` method returns [autocolresconfig::R](R) reader structure"] impl crate::Readable for AUTOCOLRESCONFIG_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [autocolresconfig::W](W) writer structure"] impl crate::Writable for AUTOCOLRESCONFIG_SPEC { type Writer = W; } #[doc = "`reset()` method sets AUTOCOLRESCONFIG to value 0x02"] impl crate::Resettable for AUTOCOLRESCONFIG_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x02 } }
31.652174
512
0.611068
6af6e76bc343eb311b78bb4a112c5804a9876e4a
965
#![allow( renamed_and_removed_lints, unknown_lints, clippy::unknown_clippy_lints, clippy::inconsistent_struct_constructor, clippy::manual_map, clippy::manual_range_contains, clippy::same_item_push, clippy::unnecessary_wraps )] #[macro_use] mod display_object; #[macro_use] extern crate smallvec; #[macro_use] extern crate downcast_rs; #[macro_use] mod avm1; mod avm2; pub mod bitmap; mod bounding_box; mod character; mod collect; pub mod color_transform; pub mod context; mod drawing; mod ecma_conversions; pub mod events; pub mod focus_tracker; mod font; mod html; mod library; pub mod loader; mod player; mod prelude; pub mod property_map; pub mod shape_utils; pub mod string_utils; pub mod tag_utils; mod transform; mod types; mod vminterface; mod xml; pub mod backend; pub mod config; pub mod external; pub use chrono; pub use events::PlayerEvent; pub use indexmap; pub use player::Player; pub use swf; pub use swf::Color;
16.355932
44
0.761658
e6e1e69c742a2114d3468673289b14e0eec3678c
1,213
//! Implement Fun/Arity pair, printing, ordering etc //! use crate::{defs::Arity, term::value::*}; use core::{cmp::Ordering, fmt}; /// Reference to an internal function in some module. #[derive(Debug, Eq, Clone, Hash)] pub struct FunArity { pub f: Term, pub arity: Arity, } impl FunArity { // /// Create an uninitialized function pointer for deferred initialization. // pub fn new_uninit() -> FunArity { // FunArity { // f: Term::non_value(), // arity: 0, // } // } /// Create from a function name and arity. pub fn new(f: Term, arity: Arity) -> FunArity { FunArity { f, arity } } } impl Ord for FunArity { fn cmp(&self, other: &FunArity) -> Ordering { let fa = (self.f, self.arity); fa.cmp(&(other.f, other.arity)) } } impl PartialOrd for FunArity { fn partial_cmp(&self, other: &FunArity) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for FunArity { fn eq(&self, other: &FunArity) -> bool { self.f == other.f && self.arity == other.arity } } // Printing funarities as "{}" impl fmt::Display for FunArity { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}/{}", self.f, self.arity) } }
22.886792
79
0.609233
dba4cc685f443d96165dcf0063aaf75e379e4bd8
197
#![no_main] use trie_db_fuzz::fuzz_that_unhashed_no_extension; use libfuzzer_sys::fuzz_target; fuzz_target!(|data: &[u8]| { // fuzzed code goes here fuzz_that_unhashed_no_extension(data); });
17.909091
50
0.761421
566f344cff1b4e84749af6d82d0b06a3c14280a1
94,344
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use integer_encoding::{VarIntReader, VarIntWriter}; use std::convert::{From, TryFrom}; use std::io; use super::{ TFieldIdentifier, TInputProtocol, TInputProtocolFactory, TListIdentifier, TMapIdentifier, TMessageIdentifier, TMessageType, }; use super::{TOutputProtocol, TOutputProtocolFactory, TSetIdentifier, TStructIdentifier, TType}; use crate::transport::{TReadTransport, TWriteTransport}; const COMPACT_PROTOCOL_ID: u8 = 0x82; const COMPACT_VERSION: u8 = 0x01; const COMPACT_VERSION_MASK: u8 = 0x1F; /// Read messages encoded in the Thrift compact protocol. /// /// # Examples /// /// Create and use a `TCompactInputProtocol`. /// /// ```no_run /// use thrift::protocol::{TCompactInputProtocol, TInputProtocol}; /// use thrift::transport::TTcpChannel; /// /// let mut channel = TTcpChannel::new(); /// channel.open("localhost:9090").unwrap(); /// /// let mut protocol = TCompactInputProtocol::new(channel); /// /// let recvd_bool = protocol.read_bool().unwrap(); /// let recvd_string = protocol.read_string().unwrap(); /// ``` #[derive(Debug)] pub struct TCompactInputProtocol<T> where T: TReadTransport, { // Identifier of the last field deserialized for a struct. last_read_field_id: i16, // Stack of the last read field ids (a new entry is added each time a nested struct is read). read_field_id_stack: Vec<i16>, // Boolean value for a field. // Saved because boolean fields and their value are encoded in a single byte, // and reading the field only occurs after the field id is read. pending_read_bool_value: Option<bool>, // Underlying transport used for byte-level operations. transport: T, } impl<T> TCompactInputProtocol<T> where T: TReadTransport, { /// Create a `TCompactInputProtocol` that reads bytes from `transport`. pub fn new(transport: T) -> TCompactInputProtocol<T> { TCompactInputProtocol { last_read_field_id: 0, read_field_id_stack: Vec::new(), pending_read_bool_value: None, transport, } } fn read_list_set_begin(&mut self) -> crate::Result<(TType, i32)> { let header = self.read_byte()?; let element_type = collection_u8_to_type(header & 0x0F)?; let element_count; let possible_element_count = (header & 0xF0) >> 4; if possible_element_count != 15 { // high bits set high if count and type encoded separately element_count = possible_element_count as i32; } else { element_count = self.transport.read_varint::<u32>()? as i32; } Ok((element_type, element_count)) } } impl<T> TInputProtocol for TCompactInputProtocol<T> where T: TReadTransport, { fn read_message_begin(&mut self) -> crate::Result<TMessageIdentifier> { let compact_id = self.read_byte()?; if compact_id != COMPACT_PROTOCOL_ID { Err(crate::Error::Protocol(crate::ProtocolError { kind: crate::ProtocolErrorKind::BadVersion, message: format!("invalid compact protocol header {:?}", compact_id), })) } else { Ok(()) }?; let type_and_byte = self.read_byte()?; let received_version = type_and_byte & COMPACT_VERSION_MASK; if received_version != COMPACT_VERSION { Err(crate::Error::Protocol(crate::ProtocolError { kind: crate::ProtocolErrorKind::BadVersion, message: format!( "cannot process compact protocol version {:?}", received_version ), })) } else { Ok(()) }?; // NOTE: unsigned right shift will pad with 0s let message_type: TMessageType = TMessageType::try_from(type_and_byte >> 5)?; // writing side wrote signed sequence number as u32 to avoid zigzag encoding let sequence_number = self.transport.read_varint::<u32>()? as i32; let service_call_name = self.read_string()?; self.last_read_field_id = 0; Ok(TMessageIdentifier::new( service_call_name, message_type, sequence_number, )) } fn read_message_end(&mut self) -> crate::Result<()> { Ok(()) } fn read_struct_begin(&mut self) -> crate::Result<Option<TStructIdentifier>> { self.read_field_id_stack.push(self.last_read_field_id); self.last_read_field_id = 0; Ok(None) } fn read_struct_end(&mut self) -> crate::Result<()> { self.last_read_field_id = self .read_field_id_stack .pop() .expect("should have previous field ids"); Ok(()) } fn read_field_begin(&mut self) -> crate::Result<TFieldIdentifier> { // we can read at least one byte, which is: // - the type // - the field delta and the type let field_type = self.read_byte()?; let field_delta = (field_type & 0xF0) >> 4; let field_type = match field_type & 0x0F { 0x01 => { self.pending_read_bool_value = Some(true); Ok(TType::Bool) } 0x02 => { self.pending_read_bool_value = Some(false); Ok(TType::Bool) } ttu8 => u8_to_type(ttu8), }?; match field_type { TType::Stop => Ok( TFieldIdentifier::new::<Option<String>, String, Option<i16>>( None, TType::Stop, None, ), ), _ => { if field_delta != 0 { self.last_read_field_id += field_delta as i16; } else { self.last_read_field_id = self.read_i16()?; }; Ok(TFieldIdentifier { name: None, field_type, id: Some(self.last_read_field_id), }) } } } fn read_field_end(&mut self) -> crate::Result<()> { Ok(()) } fn read_bool(&mut self) -> crate::Result<bool> { match self.pending_read_bool_value.take() { Some(b) => Ok(b), None => { let b = self.read_byte()?; match b { 0x01 => Ok(true), 0x02 => Ok(false), unkn => Err(crate::Error::Protocol(crate::ProtocolError { kind: crate::ProtocolErrorKind::InvalidData, message: format!("cannot convert {} into bool", unkn), })), } } } } fn read_bytes(&mut self) -> crate::Result<Vec<u8>> { let len = self.transport.read_varint::<u32>()?; let mut buf = vec![0u8; len as usize]; self.transport .read_exact(&mut buf) .map_err(From::from) .map(|_| buf) } fn read_i8(&mut self) -> crate::Result<i8> { self.read_byte().map(|i| i as i8) } fn read_i16(&mut self) -> crate::Result<i16> { self.transport.read_varint::<i16>().map_err(From::from) } fn read_i32(&mut self) -> crate::Result<i32> { self.transport.read_varint::<i32>().map_err(From::from) } fn read_i64(&mut self) -> crate::Result<i64> { self.transport.read_varint::<i64>().map_err(From::from) } fn read_double(&mut self) -> crate::Result<f64> { self.transport .read_f64::<LittleEndian>() .map_err(From::from) } fn read_string(&mut self) -> crate::Result<String> { let bytes = self.read_bytes()?; String::from_utf8(bytes).map_err(From::from) } fn read_list_begin(&mut self) -> crate::Result<TListIdentifier> { let (element_type, element_count) = self.read_list_set_begin()?; Ok(TListIdentifier::new(element_type, element_count)) } fn read_list_end(&mut self) -> crate::Result<()> { Ok(()) } fn read_set_begin(&mut self) -> crate::Result<TSetIdentifier> { let (element_type, element_count) = self.read_list_set_begin()?; Ok(TSetIdentifier::new(element_type, element_count)) } fn read_set_end(&mut self) -> crate::Result<()> { Ok(()) } fn read_map_begin(&mut self) -> crate::Result<TMapIdentifier> { let element_count = self.transport.read_varint::<u32>()? as i32; if element_count == 0 { Ok(TMapIdentifier::new(None, None, 0)) } else { let type_header = self.read_byte()?; let key_type = collection_u8_to_type((type_header & 0xF0) >> 4)?; let val_type = collection_u8_to_type(type_header & 0x0F)?; Ok(TMapIdentifier::new(key_type, val_type, element_count)) } } fn read_map_end(&mut self) -> crate::Result<()> { Ok(()) } // utility // fn read_byte(&mut self) -> crate::Result<u8> { let mut buf = [0u8; 1]; self.transport .read_exact(&mut buf) .map_err(From::from) .map(|_| buf[0]) } } impl<T> io::Seek for TCompactInputProtocol<T> where T: io::Seek + TReadTransport, { fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> { self.transport.seek(pos) } } /// Factory for creating instances of `TCompactInputProtocol`. #[derive(Default)] pub struct TCompactInputProtocolFactory; impl TCompactInputProtocolFactory { /// Create a `TCompactInputProtocolFactory`. pub fn new() -> TCompactInputProtocolFactory { TCompactInputProtocolFactory {} } } impl TInputProtocolFactory for TCompactInputProtocolFactory { fn create(&self, transport: Box<dyn TReadTransport + Send>) -> Box<dyn TInputProtocol + Send> { Box::new(TCompactInputProtocol::new(transport)) } } /// Write messages using the Thrift compact protocol. /// /// # Examples /// /// Create and use a `TCompactOutputProtocol`. /// /// ```no_run /// use thrift::protocol::{TCompactOutputProtocol, TOutputProtocol}; /// use thrift::transport::TTcpChannel; /// /// let mut channel = TTcpChannel::new(); /// channel.open("localhost:9090").unwrap(); /// /// let mut protocol = TCompactOutputProtocol::new(channel); /// /// protocol.write_bool(true).unwrap(); /// protocol.write_string("test_string").unwrap(); /// ``` #[derive(Debug)] pub struct TCompactOutputProtocol<T> where T: TWriteTransport, { // Identifier of the last field serialized for a struct. last_write_field_id: i16, // Stack of the last written field ids (new entry added each time a nested struct is written). write_field_id_stack: Vec<i16>, // Field identifier of the boolean field to be written. // Saved because boolean fields and their value are encoded in a single byte pending_write_bool_field_identifier: Option<TFieldIdentifier>, // Underlying transport used for byte-level operations. transport: T, } impl<T> TCompactOutputProtocol<T> where T: TWriteTransport, { /// Create a `TCompactOutputProtocol` that writes bytes to `transport`. pub fn new(transport: T) -> TCompactOutputProtocol<T> { TCompactOutputProtocol { last_write_field_id: 0, write_field_id_stack: Vec::new(), pending_write_bool_field_identifier: None, transport, } } // FIXME: field_type as unconstrained u8 is bad fn write_field_header(&mut self, field_type: u8, field_id: i16) -> crate::Result<()> { let field_delta = field_id - self.last_write_field_id; if field_delta > 0 && field_delta < 15 { self.write_byte(((field_delta as u8) << 4) | field_type)?; } else { self.write_byte(field_type)?; self.write_i16(field_id)?; } self.last_write_field_id = field_id; Ok(()) } fn write_list_set_begin( &mut self, element_type: TType, element_count: i32, ) -> crate::Result<()> { let elem_identifier = collection_type_to_u8(element_type); if element_count <= 14 { let header = (element_count as u8) << 4 | elem_identifier; self.write_byte(header) } else { let header = 0xF0 | elem_identifier; self.write_byte(header)?; // element count is strictly positive as per the spec, so // cast i32 as u32 so that varint writing won't use zigzag encoding self.transport .write_varint(element_count as u32) .map_err(From::from) .map(|_| ()) } } fn assert_no_pending_bool_write(&self) { if let Some(ref f) = self.pending_write_bool_field_identifier { panic!("pending bool field {:?} not written", f) } } } impl<T> TOutputProtocol for TCompactOutputProtocol<T> where T: TWriteTransport, { fn write_message_begin(&mut self, identifier: &TMessageIdentifier) -> crate::Result<()> { self.write_byte(COMPACT_PROTOCOL_ID)?; self.write_byte((u8::from(identifier.message_type) << 5) | COMPACT_VERSION)?; // cast i32 as u32 so that varint writing won't use zigzag encoding self.transport .write_varint(identifier.sequence_number as u32)?; self.write_string(&identifier.name)?; Ok(()) } fn write_message_end(&mut self) -> crate::Result<()> { self.assert_no_pending_bool_write(); Ok(()) } fn write_struct_begin(&mut self, _: &TStructIdentifier) -> crate::Result<()> { self.write_field_id_stack.push(self.last_write_field_id); self.last_write_field_id = 0; Ok(()) } fn write_struct_end(&mut self) -> crate::Result<()> { self.assert_no_pending_bool_write(); self.last_write_field_id = self .write_field_id_stack .pop() .expect("should have previous field ids"); Ok(()) } fn write_field_begin(&mut self, identifier: &TFieldIdentifier) -> crate::Result<()> { match identifier.field_type { TType::Bool => { if self.pending_write_bool_field_identifier.is_some() { panic!( "should not have a pending bool while writing another bool with id: \ {:?}", identifier ) } self.pending_write_bool_field_identifier = Some(identifier.clone()); Ok(()) } _ => { let field_type = type_to_u8(identifier.field_type); let field_id = identifier.id.expect("non-stop field should have field id"); self.write_field_header(field_type, field_id) } } } fn write_field_end(&mut self) -> crate::Result<()> { self.assert_no_pending_bool_write(); Ok(()) } fn write_field_stop(&mut self) -> crate::Result<()> { self.assert_no_pending_bool_write(); self.write_byte(type_to_u8(TType::Stop)) } fn write_bool(&mut self, b: bool) -> crate::Result<()> { match self.pending_write_bool_field_identifier.take() { Some(pending) => { let field_id = pending.id.expect("bool field should have a field id"); let field_type_as_u8 = if b { 0x01 } else { 0x02 }; self.write_field_header(field_type_as_u8, field_id) } None => { if b { self.write_byte(0x01) } else { self.write_byte(0x02) } } } } fn write_bytes(&mut self, b: &[u8]) -> crate::Result<()> { // length is strictly positive as per the spec, so // cast i32 as u32 so that varint writing won't use zigzag encoding self.transport.write_varint(b.len() as u32)?; self.transport.write_all(b).map_err(From::from) } fn write_i8(&mut self, i: i8) -> crate::Result<()> { self.write_byte(i as u8) } fn write_i16(&mut self, i: i16) -> crate::Result<()> { self.transport .write_varint(i) .map_err(From::from) .map(|_| ()) } fn write_i32(&mut self, i: i32) -> crate::Result<()> { self.transport .write_varint(i) .map_err(From::from) .map(|_| ()) } fn write_i64(&mut self, i: i64) -> crate::Result<()> { self.transport .write_varint(i) .map_err(From::from) .map(|_| ()) } fn write_double(&mut self, d: f64) -> crate::Result<()> { self.transport .write_f64::<LittleEndian>(d) .map_err(From::from) } fn write_string(&mut self, s: &str) -> crate::Result<()> { self.write_bytes(s.as_bytes()) } fn write_list_begin(&mut self, identifier: &TListIdentifier) -> crate::Result<()> { self.write_list_set_begin(identifier.element_type, identifier.size) } fn write_list_end(&mut self) -> crate::Result<()> { Ok(()) } fn write_set_begin(&mut self, identifier: &TSetIdentifier) -> crate::Result<()> { self.write_list_set_begin(identifier.element_type, identifier.size) } fn write_set_end(&mut self) -> crate::Result<()> { Ok(()) } fn write_map_begin(&mut self, identifier: &TMapIdentifier) -> crate::Result<()> { if identifier.size == 0 { self.write_byte(0) } else { // element count is strictly positive as per the spec, so // cast i32 as u32 so that varint writing won't use zigzag encoding self.transport.write_varint(identifier.size as u32)?; let key_type = identifier .key_type .expect("map identifier to write should contain key type"); let key_type_byte = collection_type_to_u8(key_type) << 4; let val_type = identifier .value_type .expect("map identifier to write should contain value type"); let val_type_byte = collection_type_to_u8(val_type); let map_type_header = key_type_byte | val_type_byte; self.write_byte(map_type_header) } } fn write_map_end(&mut self) -> crate::Result<()> { Ok(()) } fn flush(&mut self) -> crate::Result<()> { self.transport.flush().map_err(From::from) } // utility // fn write_byte(&mut self, b: u8) -> crate::Result<()> { self.transport.write(&[b]).map_err(From::from).map(|_| ()) } } /// Factory for creating instances of `TCompactOutputProtocol`. #[derive(Default)] pub struct TCompactOutputProtocolFactory; impl TCompactOutputProtocolFactory { /// Create a `TCompactOutputProtocolFactory`. pub fn new() -> TCompactOutputProtocolFactory { TCompactOutputProtocolFactory {} } } impl TOutputProtocolFactory for TCompactOutputProtocolFactory { fn create( &self, transport: Box<dyn TWriteTransport + Send>, ) -> Box<dyn TOutputProtocol + Send> { Box::new(TCompactOutputProtocol::new(transport)) } } fn collection_type_to_u8(field_type: TType) -> u8 { match field_type { TType::Bool => 0x01, f => type_to_u8(f), } } fn type_to_u8(field_type: TType) -> u8 { match field_type { TType::Stop => 0x00, TType::I08 => 0x03, // equivalent to TType::Byte TType::I16 => 0x04, TType::I32 => 0x05, TType::I64 => 0x06, TType::Double => 0x07, TType::String => 0x08, TType::List => 0x09, TType::Set => 0x0A, TType::Map => 0x0B, TType::Struct => 0x0C, _ => panic!("should not have attempted to convert {} to u8", field_type), } } fn collection_u8_to_type(b: u8) -> crate::Result<TType> { match b { 0x01 => Ok(TType::Bool), o => u8_to_type(o), } } fn u8_to_type(b: u8) -> crate::Result<TType> { match b { 0x00 => Ok(TType::Stop), 0x03 => Ok(TType::I08), // equivalent to TType::Byte 0x04 => Ok(TType::I16), 0x05 => Ok(TType::I32), 0x06 => Ok(TType::I64), 0x07 => Ok(TType::Double), 0x08 => Ok(TType::String), 0x09 => Ok(TType::List), 0x0A => Ok(TType::Set), 0x0B => Ok(TType::Map), 0x0C => Ok(TType::Struct), unkn => Err(crate::Error::Protocol(crate::ProtocolError { kind: crate::ProtocolErrorKind::InvalidData, message: format!("cannot convert {} into TType", unkn), })), } } #[cfg(test)] mod tests { use std::i32; use crate::protocol::{ TFieldIdentifier, TInputProtocol, TListIdentifier, TMapIdentifier, TMessageIdentifier, TMessageType, TOutputProtocol, TSetIdentifier, TStructIdentifier, TType, }; use crate::transport::{ReadHalf, TBufferChannel, TIoChannel, WriteHalf}; use super::*; #[test] fn must_write_message_begin_largest_maximum_positive_sequence_number() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_message_begin(&TMessageIdentifier::new( "bar", TMessageType::Reply, i32::MAX ))); #[rustfmt::skip] let expected: [u8; 11] = [ 0x82, /* protocol ID */ 0x41, /* message type | protocol version */ 0xFF, 0xFF, 0xFF, 0xFF, 0x07, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x62, 0x61, 0x72 /* "bar" */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_message_begin_largest_maximum_positive_sequence_number() { let (mut i_prot, _) = test_objects(); #[rustfmt::skip] let source_bytes: [u8; 11] = [ 0x82, /* protocol ID */ 0x41, /* message type | protocol version */ 0xFF, 0xFF, 0xFF, 0xFF, 0x07, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x62, 0x61, 0x72 /* "bar" */, ]; i_prot.transport.set_readable_bytes(&source_bytes); let expected = TMessageIdentifier::new("bar", TMessageType::Reply, i32::MAX); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&expected, &res); } #[test] fn must_write_message_begin_positive_sequence_number_0() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_message_begin(&TMessageIdentifier::new( "foo", TMessageType::Call, 431 ))); #[rustfmt::skip] let expected: [u8; 8] = [ 0x82, /* protocol ID */ 0x21, /* message type | protocol version */ 0xAF, 0x03, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x66, 0x6F, 0x6F /* "foo" */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_message_begin_positive_sequence_number_0() { let (mut i_prot, _) = test_objects(); #[rustfmt::skip] let source_bytes: [u8; 8] = [ 0x82, /* protocol ID */ 0x21, /* message type | protocol version */ 0xAF, 0x03, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x66, 0x6F, 0x6F /* "foo" */, ]; i_prot.transport.set_readable_bytes(&source_bytes); let expected = TMessageIdentifier::new("foo", TMessageType::Call, 431); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&expected, &res); } #[test] fn must_write_message_begin_positive_sequence_number_1() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_message_begin(&TMessageIdentifier::new( "bar", TMessageType::Reply, 991_828 ))); #[rustfmt::skip] let expected: [u8; 9] = [ 0x82, /* protocol ID */ 0x41, /* message type | protocol version */ 0xD4, 0xC4, 0x3C, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x62, 0x61, 0x72 /* "bar" */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_message_begin_positive_sequence_number_1() { let (mut i_prot, _) = test_objects(); #[rustfmt::skip] let source_bytes: [u8; 9] = [ 0x82, /* protocol ID */ 0x41, /* message type | protocol version */ 0xD4, 0xC4, 0x3C, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x62, 0x61, 0x72 /* "bar" */, ]; i_prot.transport.set_readable_bytes(&source_bytes); let expected = TMessageIdentifier::new("bar", TMessageType::Reply, 991_828); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&expected, &res); } #[test] fn must_write_message_begin_zero_sequence_number() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_message_begin(&TMessageIdentifier::new( "bar", TMessageType::Reply, 0 ))); #[rustfmt::skip] let expected: [u8; 7] = [ 0x82, /* protocol ID */ 0x41, /* message type | protocol version */ 0x00, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x62, 0x61, 0x72 /* "bar" */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_message_begin_zero_sequence_number() { let (mut i_prot, _) = test_objects(); #[rustfmt::skip] let source_bytes: [u8; 7] = [ 0x82, /* protocol ID */ 0x41, /* message type | protocol version */ 0x00, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x62, 0x61, 0x72 /* "bar" */, ]; i_prot.transport.set_readable_bytes(&source_bytes); let expected = TMessageIdentifier::new("bar", TMessageType::Reply, 0); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&expected, &res); } #[test] fn must_write_message_begin_largest_minimum_negative_sequence_number() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_message_begin(&TMessageIdentifier::new( "bar", TMessageType::Reply, i32::MIN ))); // two's complement notation of i32::MIN = 1000_0000_0000_0000_0000_0000_0000_0000 #[rustfmt::skip] let expected: [u8; 11] = [ 0x82, /* protocol ID */ 0x41, /* message type | protocol version */ 0x80, 0x80, 0x80, 0x80, 0x08, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x62, 0x61, 0x72 /* "bar" */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_message_begin_largest_minimum_negative_sequence_number() { let (mut i_prot, _) = test_objects(); // two's complement notation of i32::MIN = 1000_0000_0000_0000_0000_0000_0000_0000 #[rustfmt::skip] let source_bytes: [u8; 11] = [ 0x82, /* protocol ID */ 0x41, /* message type | protocol version */ 0x80, 0x80, 0x80, 0x80, 0x08, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x62, 0x61, 0x72 /* "bar" */, ]; i_prot.transport.set_readable_bytes(&source_bytes); let expected = TMessageIdentifier::new("bar", TMessageType::Reply, i32::MIN); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&expected, &res); } #[test] fn must_write_message_begin_negative_sequence_number_0() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_message_begin(&TMessageIdentifier::new( "foo", TMessageType::Call, -431 ))); // signed two's complement of -431 = 1111_1111_1111_1111_1111_1110_0101_0001 #[rustfmt::skip] let expected: [u8; 11] = [ 0x82, /* protocol ID */ 0x21, /* message type | protocol version */ 0xD1, 0xFC, 0xFF, 0xFF, 0x0F, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x66, 0x6F, 0x6F /* "foo" */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_message_begin_negative_sequence_number_0() { let (mut i_prot, _) = test_objects(); // signed two's complement of -431 = 1111_1111_1111_1111_1111_1110_0101_0001 #[rustfmt::skip] let source_bytes: [u8; 11] = [ 0x82, /* protocol ID */ 0x21, /* message type | protocol version */ 0xD1, 0xFC, 0xFF, 0xFF, 0x0F, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x66, 0x6F, 0x6F /* "foo" */, ]; i_prot.transport.set_readable_bytes(&source_bytes); let expected = TMessageIdentifier::new("foo", TMessageType::Call, -431); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&expected, &res); } #[test] fn must_write_message_begin_negative_sequence_number_1() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_message_begin(&TMessageIdentifier::new( "foo", TMessageType::Call, -73_184_125 ))); // signed two's complement of -73184125 = 1111_1011_1010_0011_0100_1100_1000_0011 #[rustfmt::skip] let expected: [u8; 11] = [ 0x82, /* protocol ID */ 0x21, /* message type | protocol version */ 0x83, 0x99, 0x8D, 0xDD, 0x0F, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x66, 0x6F, 0x6F /* "foo" */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_message_begin_negative_sequence_number_1() { let (mut i_prot, _) = test_objects(); // signed two's complement of -73184125 = 1111_1011_1010_0011_0100_1100_1000_0011 #[rustfmt::skip] let source_bytes: [u8; 11] = [ 0x82, /* protocol ID */ 0x21, /* message type | protocol version */ 0x83, 0x99, 0x8D, 0xDD, 0x0F, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x66, 0x6F, 0x6F /* "foo" */, ]; i_prot.transport.set_readable_bytes(&source_bytes); let expected = TMessageIdentifier::new("foo", TMessageType::Call, -73_184_125); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&expected, &res); } #[test] fn must_write_message_begin_negative_sequence_number_2() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_message_begin(&TMessageIdentifier::new( "foo", TMessageType::Call, -1_073_741_823 ))); // signed two's complement of -1073741823 = 1100_0000_0000_0000_0000_0000_0000_0001 #[rustfmt::skip] let expected: [u8; 11] = [ 0x82, /* protocol ID */ 0x21, /* message type | protocol version */ 0x81, 0x80, 0x80, 0x80, 0x0C, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x66, 0x6F, 0x6F /* "foo" */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_message_begin_negative_sequence_number_2() { let (mut i_prot, _) = test_objects(); // signed two's complement of -1073741823 = 1100_0000_0000_0000_0000_0000_0000_0001 #[rustfmt::skip] let source_bytes: [u8; 11] = [ 0x82, /* protocol ID */ 0x21, /* message type | protocol version */ 0x81, 0x80, 0x80, 0x80, 0x0C, /* non-zig-zag varint sequence number */ 0x03, /* message-name length */ 0x66, 0x6F, 0x6F, /* "foo" */ ]; i_prot.transport.set_readable_bytes(&source_bytes); let expected = TMessageIdentifier::new("foo", TMessageType::Call, -1_073_741_823); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&expected, &res); } #[test] fn must_round_trip_upto_i64_maxvalue() { // See https://issues.apache.org/jira/browse/THRIFT-5131 for i in 0..64 { let (mut i_prot, mut o_prot) = test_objects(); let val: i64 = ((1u64 << i) - 1) as i64; o_prot .write_field_begin(&TFieldIdentifier::new("val", TType::I64, 1)) .unwrap(); o_prot.write_i64(val).unwrap(); o_prot.write_field_end().unwrap(); o_prot.flush().unwrap(); copy_write_buffer_to_read_buffer!(o_prot); i_prot.read_field_begin().unwrap(); assert_eq!(val, i_prot.read_i64().unwrap()); } } #[test] fn must_round_trip_message_begin() { let (mut i_prot, mut o_prot) = test_objects(); let ident = TMessageIdentifier::new("service_call", TMessageType::Call, 1_283_948); assert_success!(o_prot.write_message_begin(&ident)); copy_write_buffer_to_read_buffer!(o_prot); let res = assert_success!(i_prot.read_message_begin()); assert_eq!(&res, &ident); } #[test] fn must_write_message_end() { assert_no_write(|o| o.write_message_end()); } // NOTE: structs and fields are tested together // #[test] fn must_write_struct_with_delta_fields() { let (_, mut o_prot) = test_objects(); // no bytes should be written however assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // write three fields with tiny field ids // since they're small the field ids will be encoded as deltas // since this is the first field (and it's zero) it gets the full varint write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I08, 0))); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it can be encoded as a delta assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I16, 4))); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it can be encoded as a delta assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::List, 9))); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 5] = [ 0x03, /* field type */ 0x00, /* first field id */ 0x44, /* field delta (4) | field type */ 0x59, /* field delta (5) | field type */ 0x00 /* field stop */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_struct_with_delta_fields() { let (mut i_prot, mut o_prot) = test_objects(); // no bytes should be written however assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // write three fields with tiny field ids // since they're small the field ids will be encoded as deltas // since this is the first field (and it's zero) it gets the full varint write let field_ident_1 = TFieldIdentifier::new("foo", TType::I08, 0); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it can be encoded as a delta let field_ident_2 = TFieldIdentifier::new("foo", TType::I16, 4); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it can be encoded as a delta let field_ident_3 = TFieldIdentifier::new("foo", TType::List, 9); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read the struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); assert_success!(i_prot.read_field_end()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] fn must_write_struct_with_non_zero_initial_field_and_delta_fields() { let (_, mut o_prot) = test_objects(); // no bytes should be written however assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // write three fields with tiny field ids // since they're small the field ids will be encoded as deltas // gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I32, 1))); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it can be encoded as a delta assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Set, 2))); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it can be encoded as a delta assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::String, 6))); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 4] = [ 0x15, /* field delta (1) | field type */ 0x1A, /* field delta (1) | field type */ 0x48, /* field delta (4) | field type */ 0x00 /* field stop */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_struct_with_non_zero_initial_field_and_delta_fields() { let (mut i_prot, mut o_prot) = test_objects(); // no bytes should be written however assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // write three fields with tiny field ids // since they're small the field ids will be encoded as deltas // gets a delta write let field_ident_1 = TFieldIdentifier::new("foo", TType::I32, 1); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it can be encoded as a delta let field_ident_2 = TFieldIdentifier::new("foo", TType::Set, 2); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it can be encoded as a delta let field_ident_3 = TFieldIdentifier::new("foo", TType::String, 6); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read the struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); assert_success!(i_prot.read_field_end()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] fn must_write_struct_with_long_fields() { let (_, mut o_prot) = test_objects(); // no bytes should be written however assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // write three fields with field ids that cannot be encoded as deltas // since this is the first field (and it's zero) it gets the full varint write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I32, 0))); assert_success!(o_prot.write_field_end()); // since this delta is > 15 it is encoded as a zig-zag varint assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I64, 16))); assert_success!(o_prot.write_field_end()); // since this delta is > 15 it is encoded as a zig-zag varint assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Set, 99))); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 8] = [ 0x05, /* field type */ 0x00, /* first field id */ 0x06, /* field type */ 0x20, /* zig-zag varint field id */ 0x0A, /* field type */ 0xC6, 0x01, /* zig-zag varint field id */ 0x00 /* field stop */, ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_struct_with_long_fields() { let (mut i_prot, mut o_prot) = test_objects(); // no bytes should be written however assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // write three fields with field ids that cannot be encoded as deltas // since this is the first field (and it's zero) it gets the full varint write let field_ident_1 = TFieldIdentifier::new("foo", TType::I32, 0); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_field_end()); // since this delta is > 15 it is encoded as a zig-zag varint let field_ident_2 = TFieldIdentifier::new("foo", TType::I64, 16); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_field_end()); // since this delta is > 15 it is encoded as a zig-zag varint let field_ident_3 = TFieldIdentifier::new("foo", TType::Set, 99); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read the struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); assert_success!(i_prot.read_field_end()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] fn must_write_struct_with_mix_of_long_and_delta_fields() { let (_, mut o_prot) = test_objects(); // no bytes should be written however assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // write three fields with field ids that cannot be encoded as deltas // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I64, 1))); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I32, 9))); assert_success!(o_prot.write_field_end()); // since this delta is > 15 it is encoded as a zig-zag varint assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Set, 1000))); assert_success!(o_prot.write_field_end()); // since this delta is > 15 it is encoded as a zig-zag varint assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Set, 2001))); assert_success!(o_prot.write_field_end()); // since this is only 3 up from the previous it is recorded as a delta assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Set, 2004))); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 10] = [ 0x16, /* field delta (1) | field type */ 0x85, /* field delta (8) | field type */ 0x0A, /* field type */ 0xD0, 0x0F, /* zig-zag varint field id */ 0x0A, /* field type */ 0xA2, 0x1F, /* zig-zag varint field id */ 0x3A, /* field delta (3) | field type */ 0x00 /* field stop */, ]; assert_eq_written_bytes!(o_prot, expected); } #[allow(clippy::cognitive_complexity)] #[test] fn must_round_trip_struct_with_mix_of_long_and_delta_fields() { let (mut i_prot, mut o_prot) = test_objects(); // no bytes should be written however let struct_ident = TStructIdentifier::new("foo"); assert_success!(o_prot.write_struct_begin(&struct_ident)); // write three fields with field ids that cannot be encoded as deltas // since the delta is > 0 and < 15 it gets a delta write let field_ident_1 = TFieldIdentifier::new("foo", TType::I64, 1); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it gets a delta write let field_ident_2 = TFieldIdentifier::new("foo", TType::I32, 9); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_field_end()); // since this delta is > 15 it is encoded as a zig-zag varint let field_ident_3 = TFieldIdentifier::new("foo", TType::Set, 1000); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_field_end()); // since this delta is > 15 it is encoded as a zig-zag varint let field_ident_4 = TFieldIdentifier::new("foo", TType::Set, 2001); assert_success!(o_prot.write_field_begin(&field_ident_4)); assert_success!(o_prot.write_field_end()); // since this is only 3 up from the previous it is recorded as a delta let field_ident_5 = TFieldIdentifier::new("foo", TType::Set, 2004); assert_success!(o_prot.write_field_begin(&field_ident_5)); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read the struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); assert_success!(i_prot.read_field_end()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, ..field_ident_4 } ); assert_success!(i_prot.read_field_end()); let read_ident_5 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_5, TFieldIdentifier { name: None, ..field_ident_5 } ); assert_success!(i_prot.read_field_end()); let read_ident_6 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_6, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] fn must_write_nested_structs_0() { // last field of the containing struct is a delta // first field of the the contained struct is a delta let (_, mut o_prot) = test_objects(); // start containing struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // containing struct // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I64, 1))); assert_success!(o_prot.write_field_end()); // containing struct // since this delta > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I32, 9))); assert_success!(o_prot.write_field_end()); // start contained struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // contained struct // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I08, 7))); assert_success!(o_prot.write_field_end()); // contained struct // since this delta > 15 it gets a full write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Double, 24))); assert_success!(o_prot.write_field_end()); // end contained struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); // end containing struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 7] = [ 0x16, /* field delta (1) | field type */ 0x85, /* field delta (8) | field type */ 0x73, /* field delta (7) | field type */ 0x07, /* field type */ 0x30, /* zig-zag varint field id */ 0x00, /* field stop - contained */ 0x00 /* field stop - containing */, ]; assert_eq_written_bytes!(o_prot, expected); } #[allow(clippy::cognitive_complexity)] #[test] fn must_round_trip_nested_structs_0() { // last field of the containing struct is a delta // first field of the the contained struct is a delta let (mut i_prot, mut o_prot) = test_objects(); // start containing struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // containing struct // since the delta is > 0 and < 15 it gets a delta write let field_ident_1 = TFieldIdentifier::new("foo", TType::I64, 1); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_field_end()); // containing struct // since this delta > 0 and < 15 it gets a delta write let field_ident_2 = TFieldIdentifier::new("foo", TType::I32, 9); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_field_end()); // start contained struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // contained struct // since the delta is > 0 and < 15 it gets a delta write let field_ident_3 = TFieldIdentifier::new("foo", TType::I08, 7); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_field_end()); // contained struct // since this delta > 15 it gets a full write let field_ident_4 = TFieldIdentifier::new("foo", TType::Double, 24); assert_success!(o_prot.write_field_begin(&field_ident_4)); assert_success!(o_prot.write_field_end()); // end contained struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); // end containing struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read containing struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); assert_success!(i_prot.read_field_end()); // read contained struct back assert_success!(i_prot.read_struct_begin()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, ..field_ident_4 } ); assert_success!(i_prot.read_field_end()); // end contained struct let read_ident_6 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_6, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); // end containing struct let read_ident_7 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_7, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] fn must_write_nested_structs_1() { // last field of the containing struct is a delta // first field of the the contained struct is a full write let (_, mut o_prot) = test_objects(); // start containing struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // containing struct // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I64, 1))); assert_success!(o_prot.write_field_end()); // containing struct // since this delta > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I32, 9))); assert_success!(o_prot.write_field_end()); // start contained struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // contained struct // since this delta > 15 it gets a full write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Double, 24))); assert_success!(o_prot.write_field_end()); // contained struct // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I08, 27))); assert_success!(o_prot.write_field_end()); // end contained struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); // end containing struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 7] = [ 0x16, /* field delta (1) | field type */ 0x85, /* field delta (8) | field type */ 0x07, /* field type */ 0x30, /* zig-zag varint field id */ 0x33, /* field delta (3) | field type */ 0x00, /* field stop - contained */ 0x00 /* field stop - containing */, ]; assert_eq_written_bytes!(o_prot, expected); } #[allow(clippy::cognitive_complexity)] #[test] fn must_round_trip_nested_structs_1() { // last field of the containing struct is a delta // first field of the the contained struct is a full write let (mut i_prot, mut o_prot) = test_objects(); // start containing struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // containing struct // since the delta is > 0 and < 15 it gets a delta write let field_ident_1 = TFieldIdentifier::new("foo", TType::I64, 1); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_field_end()); // containing struct // since this delta > 0 and < 15 it gets a delta write let field_ident_2 = TFieldIdentifier::new("foo", TType::I32, 9); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_field_end()); // start contained struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // contained struct // since this delta > 15 it gets a full write let field_ident_3 = TFieldIdentifier::new("foo", TType::Double, 24); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_field_end()); // contained struct // since the delta is > 0 and < 15 it gets a delta write let field_ident_4 = TFieldIdentifier::new("foo", TType::I08, 27); assert_success!(o_prot.write_field_begin(&field_ident_4)); assert_success!(o_prot.write_field_end()); // end contained struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); // end containing struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read containing struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); assert_success!(i_prot.read_field_end()); // read contained struct back assert_success!(i_prot.read_struct_begin()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, ..field_ident_4 } ); assert_success!(i_prot.read_field_end()); // end contained struct let read_ident_6 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_6, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); // end containing struct let read_ident_7 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_7, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] fn must_write_nested_structs_2() { // last field of the containing struct is a full write // first field of the the contained struct is a delta write let (_, mut o_prot) = test_objects(); // start containing struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // containing struct // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I64, 1))); assert_success!(o_prot.write_field_end()); // containing struct // since this delta > 15 it gets a full write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::String, 21))); assert_success!(o_prot.write_field_end()); // start contained struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // contained struct // since this delta > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Double, 7))); assert_success!(o_prot.write_field_end()); // contained struct // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I08, 10))); assert_success!(o_prot.write_field_end()); // end contained struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); // end containing struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 7] = [ 0x16, /* field delta (1) | field type */ 0x08, /* field type */ 0x2A, /* zig-zag varint field id */ 0x77, /* field delta(7) | field type */ 0x33, /* field delta (3) | field type */ 0x00, /* field stop - contained */ 0x00 /* field stop - containing */, ]; assert_eq_written_bytes!(o_prot, expected); } #[allow(clippy::cognitive_complexity)] #[test] fn must_round_trip_nested_structs_2() { let (mut i_prot, mut o_prot) = test_objects(); // start containing struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // containing struct // since the delta is > 0 and < 15 it gets a delta write let field_ident_1 = TFieldIdentifier::new("foo", TType::I64, 1); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_field_end()); // containing struct // since this delta > 15 it gets a full write let field_ident_2 = TFieldIdentifier::new("foo", TType::String, 21); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_field_end()); // start contained struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // contained struct // since this delta > 0 and < 15 it gets a delta write let field_ident_3 = TFieldIdentifier::new("foo", TType::Double, 7); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_field_end()); // contained struct // since the delta is > 0 and < 15 it gets a delta write let field_ident_4 = TFieldIdentifier::new("foo", TType::I08, 10); assert_success!(o_prot.write_field_begin(&field_ident_4)); assert_success!(o_prot.write_field_end()); // end contained struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); // end containing struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read containing struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); assert_success!(i_prot.read_field_end()); // read contained struct back assert_success!(i_prot.read_struct_begin()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, ..field_ident_4 } ); assert_success!(i_prot.read_field_end()); // end contained struct let read_ident_6 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_6, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); // end containing struct let read_ident_7 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_7, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] fn must_write_nested_structs_3() { // last field of the containing struct is a full write // first field of the the contained struct is a full write let (_, mut o_prot) = test_objects(); // start containing struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // containing struct // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I64, 1))); assert_success!(o_prot.write_field_end()); // containing struct // since this delta > 15 it gets a full write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::String, 21))); assert_success!(o_prot.write_field_end()); // start contained struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // contained struct // since this delta > 15 it gets a full write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Double, 21))); assert_success!(o_prot.write_field_end()); // contained struct // since the delta is > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::I08, 27))); assert_success!(o_prot.write_field_end()); // end contained struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); // end containing struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 8] = [ 0x16, /* field delta (1) | field type */ 0x08, /* field type */ 0x2A, /* zig-zag varint field id */ 0x07, /* field type */ 0x2A, /* zig-zag varint field id */ 0x63, /* field delta (6) | field type */ 0x00, /* field stop - contained */ 0x00 /* field stop - containing */, ]; assert_eq_written_bytes!(o_prot, expected); } #[allow(clippy::cognitive_complexity)] #[test] fn must_round_trip_nested_structs_3() { // last field of the containing struct is a full write // first field of the the contained struct is a full write let (mut i_prot, mut o_prot) = test_objects(); // start containing struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // containing struct // since the delta is > 0 and < 15 it gets a delta write let field_ident_1 = TFieldIdentifier::new("foo", TType::I64, 1); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_field_end()); // containing struct // since this delta > 15 it gets a full write let field_ident_2 = TFieldIdentifier::new("foo", TType::String, 21); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_field_end()); // start contained struct assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // contained struct // since this delta > 15 it gets a full write let field_ident_3 = TFieldIdentifier::new("foo", TType::Double, 21); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_field_end()); // contained struct // since the delta is > 0 and < 15 it gets a delta write let field_ident_4 = TFieldIdentifier::new("foo", TType::I08, 27); assert_success!(o_prot.write_field_begin(&field_ident_4)); assert_success!(o_prot.write_field_end()); // end contained struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); // end containing struct assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read containing struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); assert_success!(i_prot.read_field_end()); // read contained struct back assert_success!(i_prot.read_struct_begin()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, ..field_ident_4 } ); assert_success!(i_prot.read_field_end()); // end contained struct let read_ident_6 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_6, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); // end containing struct let read_ident_7 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_7, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] fn must_write_bool_field() { let (_, mut o_prot) = test_objects(); // no bytes should be written however assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); // write three fields with field ids that cannot be encoded as deltas // since the delta is > 0 and < 16 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Bool, 1))); assert_success!(o_prot.write_bool(true)); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it gets a delta write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Bool, 9))); assert_success!(o_prot.write_bool(false)); assert_success!(o_prot.write_field_end()); // since this delta > 15 it gets a full write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Bool, 26))); assert_success!(o_prot.write_bool(true)); assert_success!(o_prot.write_field_end()); // since this delta > 15 it gets a full write assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Bool, 45))); assert_success!(o_prot.write_bool(false)); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); #[rustfmt::skip] let expected: [u8; 7] = [ 0x11, /* field delta (1) | true */ 0x82, /* field delta (8) | false */ 0x01, /* true */ 0x34, /* field id */ 0x02, /* false */ 0x5A, /* field id */ 0x00 /* stop field */, ]; assert_eq_written_bytes!(o_prot, expected); } #[allow(clippy::cognitive_complexity)] #[test] fn must_round_trip_bool_field() { let (mut i_prot, mut o_prot) = test_objects(); // no bytes should be written however let struct_ident = TStructIdentifier::new("foo"); assert_success!(o_prot.write_struct_begin(&struct_ident)); // write two fields // since the delta is > 0 and < 16 it gets a delta write let field_ident_1 = TFieldIdentifier::new("foo", TType::Bool, 1); assert_success!(o_prot.write_field_begin(&field_ident_1)); assert_success!(o_prot.write_bool(true)); assert_success!(o_prot.write_field_end()); // since this delta > 0 and < 15 it gets a delta write let field_ident_2 = TFieldIdentifier::new("foo", TType::Bool, 9); assert_success!(o_prot.write_field_begin(&field_ident_2)); assert_success!(o_prot.write_bool(false)); assert_success!(o_prot.write_field_end()); // since this delta > 15 it gets a full write let field_ident_3 = TFieldIdentifier::new("foo", TType::Bool, 26); assert_success!(o_prot.write_field_begin(&field_ident_3)); assert_success!(o_prot.write_bool(true)); assert_success!(o_prot.write_field_end()); // since this delta > 15 it gets a full write let field_ident_4 = TFieldIdentifier::new("foo", TType::Bool, 45); assert_success!(o_prot.write_field_begin(&field_ident_4)); assert_success!(o_prot.write_bool(false)); assert_success!(o_prot.write_field_end()); // now, finish the struct off assert_success!(o_prot.write_field_stop()); assert_success!(o_prot.write_struct_end()); copy_write_buffer_to_read_buffer!(o_prot); // read the struct back assert_success!(i_prot.read_struct_begin()); let read_ident_1 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_1, TFieldIdentifier { name: None, ..field_ident_1 } ); let read_value_1 = assert_success!(i_prot.read_bool()); assert_eq!(read_value_1, true); assert_success!(i_prot.read_field_end()); let read_ident_2 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_2, TFieldIdentifier { name: None, ..field_ident_2 } ); let read_value_2 = assert_success!(i_prot.read_bool()); assert_eq!(read_value_2, false); assert_success!(i_prot.read_field_end()); let read_ident_3 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_3, TFieldIdentifier { name: None, ..field_ident_3 } ); let read_value_3 = assert_success!(i_prot.read_bool()); assert_eq!(read_value_3, true); assert_success!(i_prot.read_field_end()); let read_ident_4 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_4, TFieldIdentifier { name: None, ..field_ident_4 } ); let read_value_4 = assert_success!(i_prot.read_bool()); assert_eq!(read_value_4, false); assert_success!(i_prot.read_field_end()); let read_ident_5 = assert_success!(i_prot.read_field_begin()); assert_eq!( read_ident_5, TFieldIdentifier { name: None, field_type: TType::Stop, id: None, } ); assert_success!(i_prot.read_struct_end()); } #[test] #[should_panic] fn must_fail_if_write_field_end_without_writing_bool_value() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Bool, 1))); o_prot.write_field_end().unwrap(); } #[test] #[should_panic] fn must_fail_if_write_stop_field_without_writing_bool_value() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Bool, 1))); o_prot.write_field_stop().unwrap(); } #[test] #[should_panic] fn must_fail_if_write_struct_end_without_writing_bool_value() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_struct_begin(&TStructIdentifier::new("foo"))); assert_success!(o_prot.write_field_begin(&TFieldIdentifier::new("foo", TType::Bool, 1))); o_prot.write_struct_end().unwrap(); } #[test] #[should_panic] fn must_fail_if_write_struct_end_without_any_fields() { let (_, mut o_prot) = test_objects(); o_prot.write_struct_end().unwrap(); } #[test] fn must_write_field_end() { assert_no_write(|o| o.write_field_end()); } #[test] fn must_write_small_sized_list_begin() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_list_begin(&TListIdentifier::new(TType::I64, 4))); let expected: [u8; 1] = [0x46 /* size | elem_type */]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_small_sized_list_begin() { let (mut i_prot, mut o_prot) = test_objects(); let ident = TListIdentifier::new(TType::I08, 10); assert_success!(o_prot.write_list_begin(&ident)); copy_write_buffer_to_read_buffer!(o_prot); let res = assert_success!(i_prot.read_list_begin()); assert_eq!(&res, &ident); } #[test] fn must_write_large_sized_list_begin() { let (_, mut o_prot) = test_objects(); let res = o_prot.write_list_begin(&TListIdentifier::new(TType::List, 9999)); assert!(res.is_ok()); let expected: [u8; 3] = [ 0xF9, /* 0xF0 | elem_type */ 0x8F, 0x4E, /* size as varint */ ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_large_sized_list_begin() { let (mut i_prot, mut o_prot) = test_objects(); let ident = TListIdentifier::new(TType::Set, 47381); assert_success!(o_prot.write_list_begin(&ident)); copy_write_buffer_to_read_buffer!(o_prot); let res = assert_success!(i_prot.read_list_begin()); assert_eq!(&res, &ident); } #[test] fn must_write_list_end() { assert_no_write(|o| o.write_list_end()); } #[test] fn must_write_small_sized_set_begin() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_set_begin(&TSetIdentifier::new(TType::Struct, 2))); let expected: [u8; 1] = [0x2C /* size | elem_type */]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_small_sized_set_begin() { let (mut i_prot, mut o_prot) = test_objects(); let ident = TSetIdentifier::new(TType::I16, 7); assert_success!(o_prot.write_set_begin(&ident)); copy_write_buffer_to_read_buffer!(o_prot); let res = assert_success!(i_prot.read_set_begin()); assert_eq!(&res, &ident); } #[test] fn must_write_large_sized_set_begin() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_set_begin(&TSetIdentifier::new(TType::Double, 23891))); let expected: [u8; 4] = [ 0xF7, /* 0xF0 | elem_type */ 0xD3, 0xBA, 0x01, /* size as varint */ ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_large_sized_set_begin() { let (mut i_prot, mut o_prot) = test_objects(); let ident = TSetIdentifier::new(TType::Map, 3_928_429); assert_success!(o_prot.write_set_begin(&ident)); copy_write_buffer_to_read_buffer!(o_prot); let res = assert_success!(i_prot.read_set_begin()); assert_eq!(&res, &ident); } #[test] fn must_write_set_end() { assert_no_write(|o| o.write_set_end()); } #[test] fn must_write_zero_sized_map_begin() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_map_begin(&TMapIdentifier::new(TType::String, TType::I32, 0))); let expected: [u8; 1] = [0x00]; // since size is zero we don't write anything assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_read_zero_sized_map_begin() { let (mut i_prot, mut o_prot) = test_objects(); assert_success!(o_prot.write_map_begin(&TMapIdentifier::new(TType::Double, TType::I32, 0))); copy_write_buffer_to_read_buffer!(o_prot); let res = assert_success!(i_prot.read_map_begin()); assert_eq!( &res, &TMapIdentifier { key_type: None, value_type: None, size: 0, } ); } #[test] fn must_write_map_begin() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_map_begin(&TMapIdentifier::new( TType::Double, TType::String, 238 ))); let expected: [u8; 3] = [ 0xEE, 0x01, /* size as varint */ 0x78, /* key type | val type */ ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_map_begin() { let (mut i_prot, mut o_prot) = test_objects(); let ident = TMapIdentifier::new(TType::Map, TType::List, 1_928_349); assert_success!(o_prot.write_map_begin(&ident)); copy_write_buffer_to_read_buffer!(o_prot); let res = assert_success!(i_prot.read_map_begin()); assert_eq!(&res, &ident); } #[test] fn must_write_map_end() { assert_no_write(|o| o.write_map_end()); } #[test] fn must_write_map_with_bool_key_and_value() { let (_, mut o_prot) = test_objects(); assert_success!(o_prot.write_map_begin(&TMapIdentifier::new(TType::Bool, TType::Bool, 1))); assert_success!(o_prot.write_bool(true)); assert_success!(o_prot.write_bool(false)); assert_success!(o_prot.write_map_end()); let expected: [u8; 4] = [ 0x01, /* size as varint */ 0x11, /* key type | val type */ 0x01, /* key: true */ 0x02, /* val: false */ ]; assert_eq_written_bytes!(o_prot, expected); } #[test] fn must_round_trip_map_with_bool_value() { let (mut i_prot, mut o_prot) = test_objects(); let map_ident = TMapIdentifier::new(TType::Bool, TType::Bool, 2); assert_success!(o_prot.write_map_begin(&map_ident)); assert_success!(o_prot.write_bool(true)); assert_success!(o_prot.write_bool(false)); assert_success!(o_prot.write_bool(false)); assert_success!(o_prot.write_bool(true)); assert_success!(o_prot.write_map_end()); copy_write_buffer_to_read_buffer!(o_prot); // map header let rcvd_ident = assert_success!(i_prot.read_map_begin()); assert_eq!(&rcvd_ident, &map_ident); // key 1 let b = assert_success!(i_prot.read_bool()); assert_eq!(b, true); // val 1 let b = assert_success!(i_prot.read_bool()); assert_eq!(b, false); // key 2 let b = assert_success!(i_prot.read_bool()); assert_eq!(b, false); // val 2 let b = assert_success!(i_prot.read_bool()); assert_eq!(b, true); // map end assert_success!(i_prot.read_map_end()); } #[test] fn must_read_map_end() { let (mut i_prot, _) = test_objects(); assert!(i_prot.read_map_end().is_ok()); // will blow up if we try to read from empty buffer } fn test_objects() -> ( TCompactInputProtocol<ReadHalf<TBufferChannel>>, TCompactOutputProtocol<WriteHalf<TBufferChannel>>, ) { let mem = TBufferChannel::with_capacity(80, 80); let (r_mem, w_mem) = mem.split().unwrap(); let i_prot = TCompactInputProtocol::new(r_mem); let o_prot = TCompactOutputProtocol::new(w_mem); (i_prot, o_prot) } #[test] fn must_read_write_double() { let (mut i_prot, mut o_prot) = test_objects(); #[allow(clippy::approx_constant)] let double = 3.141_592_653_589_793; o_prot.write_double(double).unwrap(); copy_write_buffer_to_read_buffer!(o_prot); let read_double = i_prot.read_double().unwrap(); assert!(read_double - double < std::f64::EPSILON); } #[test] fn must_encode_double_as_other_langs() { let (_, mut o_prot) = test_objects(); let expected = [24, 45, 68, 84, 251, 33, 9, 64]; #[allow(clippy::approx_constant)] let double = 3.141_592_653_589_793; o_prot.write_double(double).unwrap(); assert_eq_written_bytes!(o_prot, expected); } fn assert_no_write<F>(mut write_fn: F) where F: FnMut(&mut TCompactOutputProtocol<WriteHalf<TBufferChannel>>) -> crate::Result<()>, { let (_, mut o_prot) = test_objects(); assert!(write_fn(&mut o_prot).is_ok()); assert_eq!(o_prot.transport.write_bytes().len(), 0); } }
33.266573
100
0.585708
2203ab57b10820c626e99c0c0d1411b1d70004ca
6,739
use clippy_utils::diagnostics::span_lint; use clippy_utils::{get_trait_def_id, paths, trait_ref_of_method}; use if_chain::if_chain; use rustc_hir as hir; use rustc_hir::intravisit::{walk_expr, NestedVisitorMap, Visitor}; use rustc_lint::{LateContext, LateLintPass}; use rustc_middle::hir::map::Map; use rustc_session::{declare_lint_pass, declare_tool_lint}; declare_clippy_lint! { /// **What it does:** Lints for suspicious operations in impls of arithmetic operators, e.g. /// subtracting elements in an Add impl. /// /// **Why this is bad?** This is probably a typo or copy-and-paste error and not intended. /// /// **Known problems:** None. /// /// **Example:** /// ```ignore /// impl Add for Foo { /// type Output = Foo; /// /// fn add(self, other: Foo) -> Foo { /// Foo(self.0 - other.0) /// } /// } /// ``` pub SUSPICIOUS_ARITHMETIC_IMPL, suspicious, "suspicious use of operators in impl of arithmetic trait" } declare_clippy_lint! { /// **What it does:** Lints for suspicious operations in impls of OpAssign, e.g. /// subtracting elements in an AddAssign impl. /// /// **Why this is bad?** This is probably a typo or copy-and-paste error and not intended. /// /// **Known problems:** None. /// /// **Example:** /// ```ignore /// impl AddAssign for Foo { /// fn add_assign(&mut self, other: Foo) { /// *self = *self - other; /// } /// } /// ``` pub SUSPICIOUS_OP_ASSIGN_IMPL, suspicious, "suspicious use of operators in impl of OpAssign trait" } declare_lint_pass!(SuspiciousImpl => [SUSPICIOUS_ARITHMETIC_IMPL, SUSPICIOUS_OP_ASSIGN_IMPL]); impl<'tcx> LateLintPass<'tcx> for SuspiciousImpl { fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'_>) { if let hir::ExprKind::Binary(binop, _, _) | hir::ExprKind::AssignOp(binop, ..) = expr.kind { match binop.node { hir::BinOpKind::Eq | hir::BinOpKind::Lt | hir::BinOpKind::Le | hir::BinOpKind::Ne | hir::BinOpKind::Ge | hir::BinOpKind::Gt => return, _ => {}, } // Check for more than one binary operation in the implemented function // Linting when multiple operations are involved can result in false positives let parent_fn = cx.tcx.hir().get_parent_item(expr.hir_id); if_chain! { if let hir::Node::ImplItem(impl_item) = cx.tcx.hir().get(parent_fn); if let hir::ImplItemKind::Fn(_, body_id) = impl_item.kind; then { let body = cx.tcx.hir().body(body_id); let mut visitor = BinaryExprVisitor { nb_binops: 0 }; walk_expr(&mut visitor, &body.value); if visitor.nb_binops > 1 { return; } } } if let Some(impl_trait) = check_binop( cx, expr, binop.node, &[ "Add", "Sub", "Mul", "Div", "Rem", "BitAnd", "BitOr", "BitXor", "Shl", "Shr", ], &[ hir::BinOpKind::Add, hir::BinOpKind::Sub, hir::BinOpKind::Mul, hir::BinOpKind::Div, hir::BinOpKind::Rem, hir::BinOpKind::BitAnd, hir::BinOpKind::BitOr, hir::BinOpKind::BitXor, hir::BinOpKind::Shl, hir::BinOpKind::Shr, ], ) { span_lint( cx, SUSPICIOUS_ARITHMETIC_IMPL, binop.span, &format!("suspicious use of binary operator in `{}` impl", impl_trait), ); } if let Some(impl_trait) = check_binop( cx, expr, binop.node, &[ "AddAssign", "SubAssign", "MulAssign", "DivAssign", "BitAndAssign", "BitOrAssign", "BitXorAssign", "RemAssign", "ShlAssign", "ShrAssign", ], &[ hir::BinOpKind::Add, hir::BinOpKind::Sub, hir::BinOpKind::Mul, hir::BinOpKind::Div, hir::BinOpKind::BitAnd, hir::BinOpKind::BitOr, hir::BinOpKind::BitXor, hir::BinOpKind::Rem, hir::BinOpKind::Shl, hir::BinOpKind::Shr, ], ) { span_lint( cx, SUSPICIOUS_OP_ASSIGN_IMPL, binop.span, &format!("suspicious use of binary operator in `{}` impl", impl_trait), ); } } } } fn check_binop( cx: &LateContext<'_>, expr: &hir::Expr<'_>, binop: hir::BinOpKind, traits: &[&'static str], expected_ops: &[hir::BinOpKind], ) -> Option<&'static str> { let mut trait_ids = vec![]; let [krate, module] = paths::OPS_MODULE; for &t in traits { let path = [krate, module, t]; if let Some(trait_id) = get_trait_def_id(cx, &path) { trait_ids.push(trait_id); } else { return None; } } // Get the actually implemented trait let parent_fn = cx.tcx.hir().get_parent_item(expr.hir_id); if_chain! { if let Some(trait_ref) = trait_ref_of_method(cx, parent_fn); if let Some(idx) = trait_ids.iter().position(|&tid| tid == trait_ref.path.res.def_id()); if binop != expected_ops[idx]; then{ return Some(traits[idx]) } } None } struct BinaryExprVisitor { nb_binops: u32, } impl<'tcx> Visitor<'tcx> for BinaryExprVisitor { type Map = Map<'tcx>; fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) { match expr.kind { hir::ExprKind::Binary(..) | hir::ExprKind::Unary(hir::UnOp::Not | hir::UnOp::Neg, _) | hir::ExprKind::AssignOp(..) => self.nb_binops += 1, _ => {}, } walk_expr(self, expr); } fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> { NestedVisitorMap::None } }
32.244019
100
0.484642
09a122544147704a16436c54460d73e9ef94a341
49,043
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use middle::def; use middle::region; use middle::subst::{VecPerParamSpace,Subst}; use middle::subst; use middle::ty::{BoundRegion, BrAnon, BrNamed}; use middle::ty::{ReEarlyBound, BrFresh, ctxt}; use middle::ty::{ReFree, ReScope, ReInfer, ReStatic, Region, ReEmpty}; use middle::ty::{ReSkolemized, ReVar, BrEnv}; use middle::ty::{mt, Ty, ParamTy}; use middle::ty::{ty_bool, ty_char, ty_struct, ty_enum}; use middle::ty::{ty_err, ty_str, ty_vec, ty_float, ty_bare_fn}; use middle::ty::{ty_param, ty_ptr, ty_rptr, ty_tup}; use middle::ty::{ty_closure}; use middle::ty::{ty_uniq, ty_trait, ty_int, ty_uint, ty_infer}; use middle::ty; use middle::ty_fold::TypeFoldable; use std::collections::HashMap; use std::collections::hash_state::HashState; use std::hash::Hash; use std::rc::Rc; use syntax::abi; use syntax::ast_map; use syntax::codemap::{Span, Pos}; use syntax::parse::token; use syntax::print::pprust; use syntax::ptr::P; use syntax::{ast, ast_util}; use syntax::owned_slice::OwnedSlice; /// Produces a string suitable for debugging output. pub trait Repr<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String; } /// Produces a string suitable for showing to the user. pub trait UserString<'tcx> : Repr<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String; } pub fn note_and_explain_region(cx: &ctxt, prefix: &str, region: ty::Region, suffix: &str) -> Option<Span> { match explain_region_and_span(cx, region) { (ref str, Some(span)) => { cx.sess.span_note( span, &format!("{}{}{}", prefix, *str, suffix)); Some(span) } (ref str, None) => { cx.sess.note( &format!("{}{}{}", prefix, *str, suffix)); None } } } /// When a free region is associated with `item`, how should we describe the item in the error /// message. fn item_scope_tag(item: &ast::Item) -> &'static str { match item.node { ast::ItemImpl(..) => "impl", ast::ItemStruct(..) => "struct", ast::ItemEnum(..) => "enum", ast::ItemTrait(..) => "trait", ast::ItemFn(..) => "function body", _ => "item" } } pub fn explain_region_and_span(cx: &ctxt, region: ty::Region) -> (String, Option<Span>) { return match region { ReScope(scope) => { let new_string; let on_unknown_scope = || { (format!("unknown scope: {:?}. Please report a bug.", scope), None) }; let span = match scope.span(&cx.map) { Some(s) => s, None => return on_unknown_scope(), }; let tag = match cx.map.find(scope.node_id()) { Some(ast_map::NodeBlock(_)) => "block", Some(ast_map::NodeExpr(expr)) => match expr.node { ast::ExprCall(..) => "call", ast::ExprMethodCall(..) => "method call", ast::ExprMatch(_, _, ast::MatchSource::IfLetDesugar { .. }) => "if let", ast::ExprMatch(_, _, ast::MatchSource::WhileLetDesugar) => "while let", ast::ExprMatch(_, _, ast::MatchSource::ForLoopDesugar) => "for", ast::ExprMatch(..) => "match", _ => "expression", }, Some(ast_map::NodeStmt(_)) => "statement", Some(ast_map::NodeItem(it)) => item_scope_tag(&*it), Some(_) | None => { // this really should not happen return on_unknown_scope(); } }; let scope_decorated_tag = match scope { region::CodeExtent::Misc(_) => tag, region::CodeExtent::DestructionScope(_) => { new_string = format!("destruction scope surrounding {}", tag); new_string.as_slice() } region::CodeExtent::Remainder(r) => { new_string = format!("block suffix following statement {}", r.first_statement_index); &*new_string } }; explain_span(cx, scope_decorated_tag, span) } ReFree(ref fr) => { let prefix = match fr.bound_region { BrAnon(idx) => { format!("the anonymous lifetime #{} defined on", idx + 1) } BrFresh(_) => "an anonymous lifetime defined on".to_string(), _ => { format!("the lifetime {} as defined on", bound_region_ptr_to_string(cx, fr.bound_region)) } }; match cx.map.find(fr.scope.node_id) { Some(ast_map::NodeBlock(ref blk)) => { let (msg, opt_span) = explain_span(cx, "block", blk.span); (format!("{} {}", prefix, msg), opt_span) } Some(ast_map::NodeItem(it)) => { let tag = item_scope_tag(&*it); let (msg, opt_span) = explain_span(cx, tag, it.span); (format!("{} {}", prefix, msg), opt_span) } Some(_) | None => { // this really should not happen (format!("{} unknown free region bounded by scope {:?}", prefix, fr.scope), None) } } } ReStatic => { ("the static lifetime".to_string(), None) } ReEmpty => { ("the empty lifetime".to_string(), None) } ReEarlyBound(_, _, _, name) => { (format!("{}", token::get_name(name)), None) } // I believe these cases should not occur (except when debugging, // perhaps) ty::ReInfer(_) | ty::ReLateBound(..) => { (format!("lifetime {:?}", region), None) } }; fn explain_span(cx: &ctxt, heading: &str, span: Span) -> (String, Option<Span>) { let lo = cx.sess.codemap().lookup_char_pos_adj(span.lo); (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()), Some(span)) } } pub fn bound_region_ptr_to_string(cx: &ctxt, br: BoundRegion) -> String { bound_region_to_string(cx, "", false, br) } pub fn bound_region_to_string(cx: &ctxt, prefix: &str, space: bool, br: BoundRegion) -> String { let space_str = if space { " " } else { "" }; if cx.sess.verbose() { return format!("{}{}{}", prefix, br.repr(cx), space_str) } match br { BrNamed(_, name) => { format!("{}{}{}", prefix, token::get_name(name), space_str) } BrAnon(_) | BrFresh(_) | BrEnv => prefix.to_string() } } // In general, if you are giving a region error message, // you should use `explain_region()` or, better yet, // `note_and_explain_region()` pub fn region_ptr_to_string(cx: &ctxt, region: Region) -> String { region_to_string(cx, "&", true, region) } pub fn region_to_string(cx: &ctxt, prefix: &str, space: bool, region: Region) -> String { let space_str = if space { " " } else { "" }; if cx.sess.verbose() { return format!("{}{}{}", prefix, region.repr(cx), space_str) } // These printouts are concise. They do not contain all the information // the user might want to diagnose an error, but there is basically no way // to fit that into a short string. Hence the recommendation to use // `explain_region()` or `note_and_explain_region()`. match region { ty::ReScope(_) => prefix.to_string(), ty::ReEarlyBound(_, _, _, name) => { token::get_name(name).to_string() } ty::ReLateBound(_, br) => bound_region_to_string(cx, prefix, space, br), ty::ReFree(ref fr) => bound_region_to_string(cx, prefix, space, fr.bound_region), ty::ReInfer(ReSkolemized(_, br)) => { bound_region_to_string(cx, prefix, space, br) } ty::ReInfer(ReVar(_)) => prefix.to_string(), ty::ReStatic => format!("{}'static{}", prefix, space_str), ty::ReEmpty => format!("{}'<empty>{}", prefix, space_str), } } pub fn mutability_to_string(m: ast::Mutability) -> String { match m { ast::MutMutable => "mut ".to_string(), ast::MutImmutable => "".to_string(), } } pub fn mt_to_string<'tcx>(cx: &ctxt<'tcx>, m: &mt<'tcx>) -> String { format!("{}{}", mutability_to_string(m.mutbl), ty_to_string(cx, m.ty)) } pub fn vec_map_to_string<T, F>(ts: &[T], f: F) -> String where F: FnMut(&T) -> String, { let tstrs = ts.iter().map(f).collect::<Vec<String>>(); format!("[{}]", tstrs.connect(", ")) } pub fn ty_to_string<'tcx>(cx: &ctxt<'tcx>, typ: &ty::TyS<'tcx>) -> String { fn bare_fn_to_string<'tcx>(cx: &ctxt<'tcx>, opt_def_id: Option<ast::DefId>, unsafety: ast::Unsafety, abi: abi::Abi, ident: Option<ast::Ident>, sig: &ty::PolyFnSig<'tcx>) -> String { let mut s = String::new(); match unsafety { ast::Unsafety::Normal => {} ast::Unsafety::Unsafe => { s.push_str(&unsafety.to_string()); s.push(' '); } }; if abi != abi::Rust { s.push_str(&format!("extern {} ", abi.to_string())); }; s.push_str("fn"); match ident { Some(i) => { s.push(' '); s.push_str(&token::get_ident(i)); } _ => { } } push_sig_to_string(cx, &mut s, '(', ')', sig); match opt_def_id { Some(def_id) => { s.push_str(" {"); let path_str = ty::item_path_str(cx, def_id); s.push_str(&path_str[..]); s.push_str("}"); } None => { } } s } fn closure_to_string<'tcx>(cx: &ctxt<'tcx>, cty: &ty::ClosureTy<'tcx>) -> String { let mut s = String::new(); s.push_str("[closure"); push_sig_to_string(cx, &mut s, '(', ')', &cty.sig); s.push(']'); s } fn push_sig_to_string<'tcx>(cx: &ctxt<'tcx>, s: &mut String, bra: char, ket: char, sig: &ty::PolyFnSig<'tcx>) { s.push(bra); let strs = sig.0.inputs .iter() .map(|a| ty_to_string(cx, *a)) .collect::<Vec<_>>(); s.push_str(&strs.connect(", ")); if sig.0.variadic { s.push_str(", ..."); } s.push(ket); match sig.0.output { ty::FnConverging(t) => { if !ty::type_is_nil(t) { s.push_str(" -> "); s.push_str(&ty_to_string(cx, t)); } } ty::FnDiverging => { s.push_str(" -> !"); } } } fn infer_ty_to_string(cx: &ctxt, ty: ty::InferTy) -> String { let print_var_ids = cx.sess.verbose(); match ty { ty::TyVar(ref vid) if print_var_ids => vid.repr(cx), ty::IntVar(ref vid) if print_var_ids => vid.repr(cx), ty::FloatVar(ref vid) if print_var_ids => vid.repr(cx), ty::TyVar(_) | ty::IntVar(_) | ty::FloatVar(_) => format!("_"), ty::FreshTy(v) => format!("FreshTy({})", v), ty::FreshIntTy(v) => format!("FreshIntTy({})", v) } } // pretty print the structural type representation: match typ.sty { ty_bool => "bool".to_string(), ty_char => "char".to_string(), ty_int(t) => ast_util::int_ty_to_string(t, None).to_string(), ty_uint(t) => ast_util::uint_ty_to_string(t, None).to_string(), ty_float(t) => ast_util::float_ty_to_string(t).to_string(), ty_uniq(typ) => format!("Box<{}>", ty_to_string(cx, typ)), ty_ptr(ref tm) => { format!("*{} {}", match tm.mutbl { ast::MutMutable => "mut", ast::MutImmutable => "const", }, ty_to_string(cx, tm.ty)) } ty_rptr(r, ref tm) => { let mut buf = region_ptr_to_string(cx, *r); buf.push_str(&mt_to_string(cx, tm)); buf } ty_tup(ref elems) => { let strs = elems .iter() .map(|elem| ty_to_string(cx, *elem)) .collect::<Vec<_>>(); match &strs[..] { [ref string] => format!("({},)", string), strs => format!("({})", strs.connect(", ")) } } ty_bare_fn(opt_def_id, ref f) => { bare_fn_to_string(cx, opt_def_id, f.unsafety, f.abi, None, &f.sig) } ty_infer(infer_ty) => infer_ty_to_string(cx, infer_ty), ty_err => "[type error]".to_string(), ty_param(ref param_ty) => { if cx.sess.verbose() { param_ty.repr(cx) } else { param_ty.user_string(cx) } } ty_enum(did, substs) | ty_struct(did, substs) => { let base = ty::item_path_str(cx, did); parameterized(cx, &base, substs, did, &[], || ty::lookup_item_type(cx, did).generics) } ty_trait(ref data) => { data.user_string(cx) } ty::ty_projection(ref data) => { format!("<{} as {}>::{}", data.trait_ref.self_ty().user_string(cx), data.trait_ref.user_string(cx), data.item_name.user_string(cx)) } ty_str => "str".to_string(), ty_closure(ref did, substs) => { let closure_tys = cx.closure_tys.borrow(); closure_tys.get(did).map(|closure_type| { closure_to_string(cx, &closure_type.subst(cx, substs)) }).unwrap_or_else(|| { if did.krate == ast::LOCAL_CRATE { let span = cx.map.span(did.node); format!("[closure {}]", span.repr(cx)) } else { format!("[closure]") } }) } ty_vec(t, sz) => { let inner_str = ty_to_string(cx, t); match sz { Some(n) => format!("[{}; {}]", inner_str, n), None => format!("[{}]", inner_str), } } } } pub fn explicit_self_category_to_str(category: &ty::ExplicitSelfCategory) -> &'static str { match *category { ty::StaticExplicitSelfCategory => "static", ty::ByValueExplicitSelfCategory => "self", ty::ByReferenceExplicitSelfCategory(_, ast::MutMutable) => { "&mut self" } ty::ByReferenceExplicitSelfCategory(_, ast::MutImmutable) => "&self", ty::ByBoxExplicitSelfCategory => "Box<self>", } } pub fn parameterized<'tcx,GG>(cx: &ctxt<'tcx>, base: &str, substs: &subst::Substs<'tcx>, did: ast::DefId, projections: &[ty::ProjectionPredicate<'tcx>], get_generics: GG) -> String where GG : FnOnce() -> ty::Generics<'tcx> { if cx.sess.verbose() { let mut strings = vec![]; match substs.regions { subst::ErasedRegions => { strings.push(format!("..")); } subst::NonerasedRegions(ref regions) => { for region in regions.iter() { strings.push(region.repr(cx)); } } } for ty in substs.types.iter() { strings.push(ty.repr(cx)); } for projection in projections.iter() { strings.push(format!("{}={}", projection.projection_ty.item_name.user_string(cx), projection.ty.user_string(cx))); } return if strings.is_empty() { format!("{}", base) } else { format!("{}<{}>", base, strings.connect(",")) }; } let mut strs = Vec::new(); match substs.regions { subst::ErasedRegions => { } subst::NonerasedRegions(ref regions) => { for &r in regions.iter() { let s = region_to_string(cx, "", false, r); if s.is_empty() { // This happens when the value of the region // parameter is not easily serialized. This may be // because the user omitted it in the first place, // or because it refers to some block in the code, // etc. I'm not sure how best to serialize this. strs.push(format!("'_")); } else { strs.push(s) } } } } // It is important to execute this conditionally, only if -Z // verbose is false. Otherwise, debug logs can sometimes cause // ICEs trying to fetch the generics early in the pipeline. This // is kind of a hacky workaround in that -Z verbose is required to // avoid those ICEs. let generics = get_generics(); let has_self = substs.self_ty().is_some(); let tps = substs.types.get_slice(subst::TypeSpace); let ty_params = generics.types.get_slice(subst::TypeSpace); let has_defaults = ty_params.last().map_or(false, |def| def.default.is_some()); let num_defaults = if has_defaults { ty_params.iter().zip(tps.iter()).rev().take_while(|&(def, &actual)| { match def.default { Some(default) => { if !has_self && ty::type_has_self(default) { // In an object type, there is no `Self`, and // thus if the default value references Self, // the user will be required to give an // explicit value. We can't even do the // substitution below to check without causing // an ICE. (#18956). false } else { default.subst(cx, substs) == actual } } None => false } }).count() } else { 0 }; for t in &tps[..tps.len() - num_defaults] { strs.push(ty_to_string(cx, *t)) } for projection in projections { strs.push(format!("{}={}", projection.projection_ty.item_name.user_string(cx), projection.ty.user_string(cx))); } if cx.lang_items.fn_trait_kind(did).is_some() && projections.len() == 1 { let projection_ty = projections[0].ty; let tail = if ty::type_is_nil(projection_ty) { format!("") } else { format!(" -> {}", projection_ty.user_string(cx)) }; format!("{}({}){}", base, if strs[0].starts_with("(") && strs[0].ends_with(",)") { &strs[0][1 .. strs[0].len() - 2] // Remove '(' and ',)' } else if strs[0].starts_with("(") && strs[0].ends_with(")") { &strs[0][1 .. strs[0].len() - 1] // Remove '(' and ')' } else { &strs[0][..] }, tail) } else if strs.len() > 0 { format!("{}<{}>", base, strs.connect(", ")) } else { format!("{}", base) } } pub fn ty_to_short_str<'tcx>(cx: &ctxt<'tcx>, typ: Ty<'tcx>) -> String { let mut s = typ.repr(cx).to_string(); if s.len() >= 32 { s = (&s[0..32]).to_string(); } return s; } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for Option<T> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { match self { &None => "None".to_string(), &Some(ref t) => t.repr(tcx), } } } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for P<T> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { (**self).repr(tcx) } } impl<'tcx,T:Repr<'tcx>,U:Repr<'tcx>> Repr<'tcx> for Result<T,U> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { match self { &Ok(ref t) => t.repr(tcx), &Err(ref u) => format!("Err({})", u.repr(tcx)) } } } impl<'tcx> Repr<'tcx> for () { fn repr(&self, _tcx: &ctxt) -> String { "()".to_string() } } impl<'a, 'tcx, T: ?Sized +Repr<'tcx>> Repr<'tcx> for &'a T { fn repr(&self, tcx: &ctxt<'tcx>) -> String { Repr::repr(*self, tcx) } } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for Rc<T> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { (&**self).repr(tcx) } } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for Box<T> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { (&**self).repr(tcx) } } fn repr_vec<'tcx, T:Repr<'tcx>>(tcx: &ctxt<'tcx>, v: &[T]) -> String { vec_map_to_string(v, |t| t.repr(tcx)) } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for [T] { fn repr(&self, tcx: &ctxt<'tcx>) -> String { repr_vec(tcx, self) } } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for OwnedSlice<T> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { repr_vec(tcx, &self[..]) } } // This is necessary to handle types like Option<~[T]>, for which // autoderef cannot convert the &[T] handler impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for Vec<T> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { repr_vec(tcx, &self[..]) } } impl<'tcx, T:UserString<'tcx>> UserString<'tcx> for Vec<T> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { let strs: Vec<String> = self.iter().map(|t| t.user_string(tcx)).collect(); strs.connect(", ") } } impl<'tcx> Repr<'tcx> for def::Def { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } /// This curious type is here to help pretty-print trait objects. In /// a trait object, the projections are stored separately from the /// main trait bound, but in fact we want to package them together /// when printing out; they also have separate binders, but we want /// them to share a binder when we print them out. (And the binder /// pretty-printing logic is kind of clever and we don't want to /// reproduce it.) So we just repackage up the structure somewhat. /// /// Right now there is only one trait in an object that can have /// projection bounds, so we just stuff them altogether. But in /// reality we should eventually sort things out better. type TraitAndProjections<'tcx> = (Rc<ty::TraitRef<'tcx>>, Vec<ty::ProjectionPredicate<'tcx>>); impl<'tcx> UserString<'tcx> for TraitAndProjections<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { let &(ref trait_ref, ref projection_bounds) = self; let base = ty::item_path_str(tcx, trait_ref.def_id); parameterized(tcx, &base, trait_ref.substs, trait_ref.def_id, &projection_bounds[..], || ty::lookup_trait_def(tcx, trait_ref.def_id).generics.clone()) } } impl<'tcx> UserString<'tcx> for ty::TyTrait<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { let &ty::TyTrait { ref principal, ref bounds } = self; let mut components = vec![]; let tap: ty::Binder<TraitAndProjections<'tcx>> = ty::Binder((principal.0.clone(), bounds.projection_bounds.iter().map(|x| x.0.clone()).collect())); // Generate the main trait ref, including associated types. components.push(tap.user_string(tcx)); // Builtin bounds. for bound in &bounds.builtin_bounds { components.push(bound.user_string(tcx)); } // Region, if not obviously implied by builtin bounds. if bounds.region_bound != ty::ReStatic { // Region bound is implied by builtin bounds: components.push(bounds.region_bound.user_string(tcx)); } components.retain(|s| !s.is_empty()); components.connect(" + ") } } impl<'tcx> Repr<'tcx> for ty::TypeParameterDef<'tcx> { fn repr(&self, _tcx: &ctxt<'tcx>) -> String { format!("TypeParameterDef({:?}, {:?}/{})", self.def_id, self.space, self.index) } } impl<'tcx> Repr<'tcx> for ty::RegionParameterDef { fn repr(&self, tcx: &ctxt) -> String { format!("RegionParameterDef(name={}, def_id={}, bounds={})", token::get_name(self.name), self.def_id.repr(tcx), self.bounds.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::TyS<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { ty_to_string(tcx, self) } } impl<'tcx> Repr<'tcx> for ty::mt<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { mt_to_string(tcx, self) } } impl<'tcx> Repr<'tcx> for subst::Substs<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("Substs[types={}, regions={}]", self.types.repr(tcx), self.regions.repr(tcx)) } } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for subst::VecPerParamSpace<T> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("[{};{};{}]", self.get_slice(subst::TypeSpace).repr(tcx), self.get_slice(subst::SelfSpace).repr(tcx), self.get_slice(subst::FnSpace).repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::ItemSubsts<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("ItemSubsts({})", self.substs.repr(tcx)) } } impl<'tcx> Repr<'tcx> for subst::RegionSubsts { fn repr(&self, tcx: &ctxt) -> String { match *self { subst::ErasedRegions => "erased".to_string(), subst::NonerasedRegions(ref regions) => regions.repr(tcx) } } } impl<'tcx> Repr<'tcx> for ty::BuiltinBounds { fn repr(&self, _tcx: &ctxt) -> String { let mut res = Vec::new(); for b in self { res.push(match b { ty::BoundSend => "Send".to_string(), ty::BoundSized => "Sized".to_string(), ty::BoundCopy => "Copy".to_string(), ty::BoundSync => "Sync".to_string(), }); } res.connect("+") } } impl<'tcx> Repr<'tcx> for ty::ParamBounds<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { let mut res = Vec::new(); res.push(self.builtin_bounds.repr(tcx)); for t in &self.trait_bounds { res.push(t.repr(tcx)); } res.connect("+") } } impl<'tcx> Repr<'tcx> for ty::TraitRef<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { // when printing out the debug representation, we don't need // to enumerate the `for<...>` etc because the debruijn index // tells you everything you need to know. let base = ty::item_path_str(tcx, self.def_id); parameterized(tcx, &base, self.substs, self.def_id, &[], || ty::lookup_trait_def(tcx, self.def_id).generics.clone()) } } impl<'tcx> Repr<'tcx> for ty::TraitDef<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("TraitDef(generics={}, trait_ref={})", self.generics.repr(tcx), self.trait_ref.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ast::TraitItem { fn repr(&self, _tcx: &ctxt) -> String { match *self { ast::RequiredMethod(ref data) => format!("RequiredMethod({}, id={})", data.ident, data.id), ast::ProvidedMethod(ref data) => format!("ProvidedMethod(id={})", data.id), ast::TypeTraitItem(ref data) => format!("TypeTraitItem({}, id={})", data.ty_param.ident, data.ty_param.id), } } } impl<'tcx> Repr<'tcx> for ast::Expr { fn repr(&self, _tcx: &ctxt) -> String { format!("expr({}: {})", self.id, pprust::expr_to_string(self)) } } impl<'tcx> Repr<'tcx> for ast::Path { fn repr(&self, _tcx: &ctxt) -> String { format!("path({})", pprust::path_to_string(self)) } } impl<'tcx> UserString<'tcx> for ast::Path { fn user_string(&self, _tcx: &ctxt) -> String { pprust::path_to_string(self) } } impl<'tcx> Repr<'tcx> for ast::Ty { fn repr(&self, _tcx: &ctxt) -> String { format!("type({})", pprust::ty_to_string(self)) } } impl<'tcx> Repr<'tcx> for ast::Item { fn repr(&self, tcx: &ctxt) -> String { format!("item({})", tcx.map.node_to_string(self.id)) } } impl<'tcx> Repr<'tcx> for ast::Lifetime { fn repr(&self, _tcx: &ctxt) -> String { format!("lifetime({}: {})", self.id, pprust::lifetime_to_string(self)) } } impl<'tcx> Repr<'tcx> for ast::Stmt { fn repr(&self, _tcx: &ctxt) -> String { format!("stmt({}: {})", ast_util::stmt_id(self), pprust::stmt_to_string(self)) } } impl<'tcx> Repr<'tcx> for ast::Pat { fn repr(&self, _tcx: &ctxt) -> String { format!("pat({}: {})", self.id, pprust::pat_to_string(self)) } } impl<'tcx> Repr<'tcx> for ty::BoundRegion { fn repr(&self, tcx: &ctxt) -> String { match *self { ty::BrAnon(id) => format!("BrAnon({})", id), ty::BrNamed(id, name) => { format!("BrNamed({}, {})", id.repr(tcx), token::get_name(name)) } ty::BrFresh(id) => format!("BrFresh({})", id), ty::BrEnv => "BrEnv".to_string() } } } impl<'tcx> Repr<'tcx> for ty::Region { fn repr(&self, tcx: &ctxt) -> String { match *self { ty::ReEarlyBound(id, space, index, name) => { format!("ReEarlyBound({}, {:?}, {}, {})", id, space, index, token::get_name(name)) } ty::ReLateBound(binder_id, ref bound_region) => { format!("ReLateBound({:?}, {})", binder_id, bound_region.repr(tcx)) } ty::ReFree(ref fr) => fr.repr(tcx), ty::ReScope(id) => { format!("ReScope({:?})", id) } ty::ReStatic => { "ReStatic".to_string() } ty::ReInfer(ReVar(ref vid)) => { format!("{:?}", vid) } ty::ReInfer(ReSkolemized(id, ref bound_region)) => { format!("re_skolemized({}, {})", id, bound_region.repr(tcx)) } ty::ReEmpty => { "ReEmpty".to_string() } } } } impl<'tcx> UserString<'tcx> for ty::Region { fn user_string(&self, tcx: &ctxt) -> String { region_to_string(tcx, "", false, *self) } } impl<'tcx> Repr<'tcx> for ty::FreeRegion { fn repr(&self, tcx: &ctxt) -> String { format!("ReFree({}, {})", self.scope.repr(tcx), self.bound_region.repr(tcx)) } } impl<'tcx> Repr<'tcx> for region::CodeExtent { fn repr(&self, _tcx: &ctxt) -> String { match *self { region::CodeExtent::Misc(node_id) => format!("Misc({})", node_id), region::CodeExtent::DestructionScope(node_id) => format!("DestructionScope({})", node_id), region::CodeExtent::Remainder(rem) => format!("Remainder({}, {})", rem.block, rem.first_statement_index), } } } impl<'tcx> Repr<'tcx> for region::DestructionScopeData { fn repr(&self, _tcx: &ctxt) -> String { match *self { region::DestructionScopeData{ node_id } => format!("DestructionScopeData {{ node_id: {} }}", node_id), } } } impl<'tcx> Repr<'tcx> for ast::DefId { fn repr(&self, tcx: &ctxt) -> String { // Unfortunately, there seems to be no way to attempt to print // a path for a def-id, so I'll just make a best effort for now // and otherwise fallback to just printing the crate/node pair if self.krate == ast::LOCAL_CRATE { match tcx.map.find(self.node) { Some(ast_map::NodeItem(..)) | Some(ast_map::NodeForeignItem(..)) | Some(ast_map::NodeImplItem(..)) | Some(ast_map::NodeTraitItem(..)) | Some(ast_map::NodeVariant(..)) | Some(ast_map::NodeStructCtor(..)) => { return format!( "{:?}:{}", *self, ty::item_path_str(tcx, *self)) } _ => {} } } return format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ty::TypeScheme<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("TypeScheme {{generics: {}, ty: {}}}", self.generics.repr(tcx), self.ty.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::Generics<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("Generics(types: {}, regions: {})", self.types.repr(tcx), self.regions.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::GenericPredicates<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("GenericPredicates(predicates: {})", self.predicates.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::InstantiatedPredicates<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("InstantiatedPredicates({})", self.predicates.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::ItemVariances { fn repr(&self, tcx: &ctxt) -> String { format!("ItemVariances(types={}, \ regions={})", self.types.repr(tcx), self.regions.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::Variance { fn repr(&self, _: &ctxt) -> String { // The first `.to_string()` returns a &'static str (it is not an implementation // of the ToString trait). Because of that, we need to call `.to_string()` again // if we want to have a `String`. let result: &'static str = (*self).to_string(); result.to_string() } } impl<'tcx> Repr<'tcx> for ty::Method<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("method(name: {}, generics: {}, fty: {}, \ explicit_self: {}, vis: {}, def_id: {})", self.name.repr(tcx), self.generics.repr(tcx), self.fty.repr(tcx), self.explicit_self.repr(tcx), self.vis.repr(tcx), self.def_id.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ast::Name { fn repr(&self, _tcx: &ctxt) -> String { token::get_name(*self).to_string() } } impl<'tcx> UserString<'tcx> for ast::Name { fn user_string(&self, _tcx: &ctxt) -> String { token::get_name(*self).to_string() } } impl<'tcx> Repr<'tcx> for ast::Ident { fn repr(&self, _tcx: &ctxt) -> String { token::get_ident(*self).to_string() } } impl<'tcx> Repr<'tcx> for ast::ExplicitSelf_ { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ast::Visibility { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ty::BareFnTy<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("BareFnTy {{unsafety: {}, abi: {}, sig: {}}}", self.unsafety, self.abi.to_string(), self.sig.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::FnSig<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("fn{} -> {}", self.inputs.repr(tcx), self.output.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::FnOutput<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { match *self { ty::FnConverging(ty) => format!("FnConverging({0})", ty.repr(tcx)), ty::FnDiverging => "FnDiverging".to_string() } } } impl<'tcx> Repr<'tcx> for ty::MethodCallee<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("MethodCallee {{origin: {}, ty: {}, {}}}", self.origin.repr(tcx), self.ty.repr(tcx), self.substs.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::MethodOrigin<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { match self { &ty::MethodStatic(def_id) => { format!("MethodStatic({})", def_id.repr(tcx)) } &ty::MethodStaticClosure(def_id) => { format!("MethodStaticClosure({})", def_id.repr(tcx)) } &ty::MethodTypeParam(ref p) => { p.repr(tcx) } &ty::MethodTraitObject(ref p) => { p.repr(tcx) } } } } impl<'tcx> Repr<'tcx> for ty::MethodParam<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("MethodParam({},{})", self.trait_ref.repr(tcx), self.method_num) } } impl<'tcx> Repr<'tcx> for ty::MethodObject<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("MethodObject({},{},{})", self.trait_ref.repr(tcx), self.method_num, self.vtable_index) } } impl<'tcx> Repr<'tcx> for ty::BuiltinBound { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> UserString<'tcx> for ty::BuiltinBound { fn user_string(&self, _tcx: &ctxt) -> String { match *self { ty::BoundSend => "Send".to_string(), ty::BoundSized => "Sized".to_string(), ty::BoundCopy => "Copy".to_string(), ty::BoundSync => "Sync".to_string(), } } } impl<'tcx> Repr<'tcx> for Span { fn repr(&self, tcx: &ctxt) -> String { tcx.sess.codemap().span_to_string(*self).to_string() } } impl<'tcx, A:UserString<'tcx>> UserString<'tcx> for Rc<A> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { let this: &A = &**self; this.user_string(tcx) } } impl<'tcx> UserString<'tcx> for ty::ParamBounds<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { let mut result = Vec::new(); let s = self.builtin_bounds.user_string(tcx); if !s.is_empty() { result.push(s); } for n in &self.trait_bounds { result.push(n.user_string(tcx)); } result.connect(" + ") } } impl<'tcx> Repr<'tcx> for ty::ExistentialBounds<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { let mut res = Vec::new(); let region_str = self.region_bound.user_string(tcx); if !region_str.is_empty() { res.push(region_str); } for bound in &self.builtin_bounds { res.push(bound.user_string(tcx)); } for projection_bound in &self.projection_bounds { res.push(projection_bound.user_string(tcx)); } res.connect("+") } } impl<'tcx> UserString<'tcx> for ty::BuiltinBounds { fn user_string(&self, tcx: &ctxt) -> String { self.iter() .map(|bb| bb.user_string(tcx)) .collect::<Vec<String>>() .connect("+") .to_string() } } impl<'tcx, T> UserString<'tcx> for ty::Binder<T> where T : UserString<'tcx> + TypeFoldable<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { // Replace any anonymous late-bound regions with named // variants, using gensym'd identifiers, so that we can // clearly differentiate between named and unnamed regions in // the output. We'll probably want to tweak this over time to // decide just how much information to give. let mut names = Vec::new(); let (unbound_value, _) = ty::replace_late_bound_regions(tcx, self, |br| { ty::ReLateBound(ty::DebruijnIndex::new(1), match br { ty::BrNamed(_, name) => { names.push(token::get_name(name)); br } ty::BrAnon(_) | ty::BrFresh(_) | ty::BrEnv => { let name = token::gensym("'r"); names.push(token::get_name(name)); ty::BrNamed(ast_util::local_def(ast::DUMMY_NODE_ID), name) } }) }); let names: Vec<_> = names.iter().map(|s| &s[..]).collect(); let value_str = unbound_value.user_string(tcx); if names.len() == 0 { value_str } else { format!("for<{}> {}", names.connect(","), value_str) } } } impl<'tcx> UserString<'tcx> for ty::TraitRef<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { let path_str = ty::item_path_str(tcx, self.def_id); parameterized(tcx, &path_str, self.substs, self.def_id, &[], || ty::lookup_trait_def(tcx, self.def_id).generics.clone()) } } impl<'tcx> UserString<'tcx> for Ty<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { ty_to_string(tcx, *self) } } impl<'tcx> UserString<'tcx> for ast::Ident { fn user_string(&self, _tcx: &ctxt) -> String { token::get_name(self.name).to_string() } } impl<'tcx> Repr<'tcx> for abi::Abi { fn repr(&self, _tcx: &ctxt) -> String { self.to_string() } } impl<'tcx> UserString<'tcx> for abi::Abi { fn user_string(&self, _tcx: &ctxt) -> String { self.to_string() } } impl<'tcx> Repr<'tcx> for ty::UpvarId { fn repr(&self, tcx: &ctxt) -> String { format!("UpvarId({};`{}`;{})", self.var_id, ty::local_var_name_str(tcx, self.var_id), self.closure_expr_id) } } impl<'tcx> Repr<'tcx> for ast::Mutability { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ty::BorrowKind { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ty::UpvarBorrow { fn repr(&self, tcx: &ctxt) -> String { format!("UpvarBorrow({}, {})", self.kind.repr(tcx), self.region.repr(tcx)) } } impl<'tcx> Repr<'tcx> for ty::UpvarCapture { fn repr(&self, tcx: &ctxt) -> String { match *self { ty::UpvarCapture::ByValue => format!("ByValue"), ty::UpvarCapture::ByRef(ref data) => format!("ByRef({})", data.repr(tcx)), } } } impl<'tcx> Repr<'tcx> for ty::IntVid { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", self) } } impl<'tcx> Repr<'tcx> for ty::FloatVid { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", self) } } impl<'tcx> Repr<'tcx> for ty::RegionVid { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", self) } } impl<'tcx> Repr<'tcx> for ty::TyVid { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", self) } } impl<'tcx> Repr<'tcx> for ty::IntVarValue { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ast::IntTy { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ast::UintTy { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ast::FloatTy { fn repr(&self, _tcx: &ctxt) -> String { format!("{:?}", *self) } } impl<'tcx> Repr<'tcx> for ty::ExplicitSelfCategory { fn repr(&self, _: &ctxt) -> String { explicit_self_category_to_str(self).to_string() } } impl<'tcx> UserString<'tcx> for ParamTy { fn user_string(&self, _tcx: &ctxt) -> String { format!("{}", token::get_name(self.name)) } } impl<'tcx> Repr<'tcx> for ParamTy { fn repr(&self, tcx: &ctxt) -> String { let ident = self.user_string(tcx); format!("{}/{:?}.{}", ident, self.space, self.idx) } } impl<'tcx, A:Repr<'tcx>, B:Repr<'tcx>> Repr<'tcx> for (A,B) { fn repr(&self, tcx: &ctxt<'tcx>) -> String { let &(ref a, ref b) = self; format!("({},{})", a.repr(tcx), b.repr(tcx)) } } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for ty::Binder<T> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("Binder({})", self.0.repr(tcx)) } } impl<'tcx, S, K, V> Repr<'tcx> for HashMap<K, V, S> where K: Hash + Eq + Repr<'tcx>, V: Repr<'tcx>, S: HashState, { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("HashMap({})", self.iter() .map(|(k,v)| format!("{} => {}", k.repr(tcx), v.repr(tcx))) .collect::<Vec<String>>() .connect(", ")) } } impl<'tcx, T, U> Repr<'tcx> for ty::OutlivesPredicate<T,U> where T : Repr<'tcx> + TypeFoldable<'tcx>, U : Repr<'tcx> + TypeFoldable<'tcx>, { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("OutlivesPredicate({}, {})", self.0.repr(tcx), self.1.repr(tcx)) } } impl<'tcx, T, U> UserString<'tcx> for ty::OutlivesPredicate<T,U> where T : UserString<'tcx> + TypeFoldable<'tcx>, U : UserString<'tcx> + TypeFoldable<'tcx>, { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { format!("{} : {}", self.0.user_string(tcx), self.1.user_string(tcx)) } } impl<'tcx> Repr<'tcx> for ty::EquatePredicate<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("EquatePredicate({}, {})", self.0.repr(tcx), self.1.repr(tcx)) } } impl<'tcx> UserString<'tcx> for ty::EquatePredicate<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { format!("{} == {}", self.0.user_string(tcx), self.1.user_string(tcx)) } } impl<'tcx> Repr<'tcx> for ty::TraitPredicate<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("TraitPredicate({})", self.trait_ref.repr(tcx)) } } impl<'tcx> UserString<'tcx> for ty::TraitPredicate<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { format!("{} : {}", self.trait_ref.self_ty().user_string(tcx), self.trait_ref.user_string(tcx)) } } impl<'tcx> UserString<'tcx> for ty::ProjectionPredicate<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { format!("{} == {}", self.projection_ty.user_string(tcx), self.ty.user_string(tcx)) } } impl<'tcx> Repr<'tcx> for ty::ProjectionTy<'tcx> { fn repr(&self, tcx: &ctxt<'tcx>) -> String { format!("<{} as {}>::{}", self.trait_ref.substs.self_ty().repr(tcx), self.trait_ref.repr(tcx), self.item_name.repr(tcx)) } } impl<'tcx> UserString<'tcx> for ty::ProjectionTy<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { format!("<{} as {}>::{}", self.trait_ref.self_ty().user_string(tcx), self.trait_ref.user_string(tcx), self.item_name.user_string(tcx)) } } impl<'tcx> UserString<'tcx> for ty::Predicate<'tcx> { fn user_string(&self, tcx: &ctxt<'tcx>) -> String { match *self { ty::Predicate::Trait(ref data) => data.user_string(tcx), ty::Predicate::Equate(ref predicate) => predicate.user_string(tcx), ty::Predicate::RegionOutlives(ref predicate) => predicate.user_string(tcx), ty::Predicate::TypeOutlives(ref predicate) => predicate.user_string(tcx), ty::Predicate::Projection(ref predicate) => predicate.user_string(tcx), } } }
31.908263
95
0.505393
61d8fc2941afb71a425abc1ddd3ee01461d9ebe6
20,730
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! This is an Earley-like parser, without support for in-grammar nonterminals, //! only by calling out to the main rust parser for named nonterminals (which it //! commits to fully when it hits one in a grammar). This means that there are no //! completer or predictor rules, and therefore no need to store one column per //! token: instead, there's a set of current Earley items and a set of next //! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in //! pathological cases, is worse than traditional Earley parsing, but it's an //! easier fit for Macro-by-Example-style rules, and I think the overhead is //! lower. (In order to prevent the pathological case, we'd need to lazily //! construct the resulting `NamedMatch`es at the very end. It'd be a pain, //! and require more memory to keep around old items, but it would also save //! overhead) //! //! Quick intro to how the parser works: //! //! A 'position' is a dot in the middle of a matcher, usually represented as a //! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. //! //! The parser walks through the input a character at a time, maintaining a list //! of items consistent with the current position in the input string: `cur_eis`. //! //! As it processes them, it fills up `eof_eis` with items that would be valid if //! the macro invocation is now over, `bb_eis` with items that are waiting on //! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting //! on a particular token. Most of the logic concerns moving the · through the //! repetitions indicated by Kleene stars. It only advances or calls out to the //! real Rust parser when no `cur_eis` items remain //! //! Example: //! //! ```text, ignore //! Start parsing a a a a b against [· a $( a )* a b]. //! //! Remaining input: a a a a b //! next_eis: [· a $( a )* a b] //! //! - - - Advance over an a. - - - //! //! Remaining input: a a a b //! cur: [a · $( a )* a b] //! Descend/Skip (first item). //! next: [a $( · a )* a b] [a $( a )* · a b]. //! //! - - - Advance over an a. - - - //! //! Remaining input: a a b //! cur: [a $( a · )* a b] next: [a $( a )* a · b] //! Finish/Repeat (first item) //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] //! //! - - - Advance over an a. - - - (this looks exactly like the last step) //! //! Remaining input: a b //! cur: [a $( a · )* a b] next: [a $( a )* a · b] //! Finish/Repeat (first item) //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] //! //! - - - Advance over an a. - - - (this looks exactly like the last step) //! //! Remaining input: b //! cur: [a $( a · )* a b] next: [a $( a )* a · b] //! Finish/Repeat (first item) //! next: [a $( a )* · a b] [a $( · a )* a b] //! //! - - - Advance over a b. - - - //! //! Remaining input: '' //! eof: [a $( a )* a b ·] //! ``` pub use self::NamedMatch::*; pub use self::ParseResult::*; use self::TokenTreeOrTokenTreeVec::*; use ast::Ident; use syntax_pos::{self, BytePos, Span}; use codemap::Spanned; use errors::FatalError; use ext::tt::quoted::{self, TokenTree}; use parse::{Directory, ParseSess}; use parse::parser::{PathStyle, Parser}; use parse::token::{self, DocComment, Token, Nonterminal}; use print::pprust; use symbol::keywords; use tokenstream::TokenStream; use util::small_vector::SmallVector; use std::mem; use std::rc::Rc; use std::collections::HashMap; use std::collections::hash_map::Entry::{Vacant, Occupied}; // To avoid costly uniqueness checks, we require that `MatchSeq` always has // a nonempty body. #[derive(Clone)] enum TokenTreeOrTokenTreeVec { Tt(TokenTree), TtSeq(Vec<TokenTree>), } impl TokenTreeOrTokenTreeVec { fn len(&self) -> usize { match *self { TtSeq(ref v) => v.len(), Tt(ref tt) => tt.len(), } } fn get_tt(&self, index: usize) -> TokenTree { match *self { TtSeq(ref v) => v[index].clone(), Tt(ref tt) => tt.get_tt(index), } } } /// an unzipping of `TokenTree`s #[derive(Clone)] struct MatcherTtFrame { elts: TokenTreeOrTokenTreeVec, idx: usize, } #[derive(Clone)] struct MatcherPos { stack: Vec<MatcherTtFrame>, top_elts: TokenTreeOrTokenTreeVec, sep: Option<Token>, idx: usize, up: Option<Box<MatcherPos>>, matches: Vec<Rc<Vec<NamedMatch>>>, match_lo: usize, match_cur: usize, match_hi: usize, sp_lo: BytePos, } impl MatcherPos { fn push_match(&mut self, idx: usize, m: NamedMatch) { let matches = Rc::make_mut(&mut self.matches[idx]); matches.push(m); } } pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>; pub fn count_names(ms: &[TokenTree]) -> usize { ms.iter().fold(0, |count, elt| { count + match *elt { TokenTree::Sequence(_, ref seq) => { seq.num_captures } TokenTree::Delimited(_, ref delim) => { count_names(&delim.tts) } TokenTree::MetaVarDecl(..) => { 1 } TokenTree::Token(..) => 0, } }) } fn initial_matcher_pos(ms: Vec<TokenTree>, lo: BytePos) -> Box<MatcherPos> { let match_idx_hi = count_names(&ms[..]); let matches = create_matches(match_idx_hi); Box::new(MatcherPos { stack: vec![], top_elts: TtSeq(ms), sep: None, idx: 0, up: None, matches: matches, match_lo: 0, match_cur: 0, match_hi: match_idx_hi, sp_lo: lo }) } /// `NamedMatch` is a pattern-match result for a single `token::MATCH_NONTERMINAL`: /// so it is associated with a single ident in a parse, and all /// `MatchedNonterminal`s in the `NamedMatch` have the same nonterminal type /// (expr, item, etc). Each leaf in a single `NamedMatch` corresponds to a /// single `token::MATCH_NONTERMINAL` in the `TokenTree` that produced it. /// /// The in-memory structure of a particular `NamedMatch` represents the match /// that occurred when a particular subset of a matcher was applied to a /// particular token tree. /// /// The width of each `MatchedSeq` in the `NamedMatch`, and the identity of /// the `MatchedNonterminal`s, will depend on the token tree it was applied /// to: each `MatchedSeq` corresponds to a single `TTSeq` in the originating /// token tree. The depth of the `NamedMatch` structure will therefore depend /// only on the nesting depth of `ast::TTSeq`s in the originating /// token tree it was derived from. #[derive(Debug, Clone)] pub enum NamedMatch { MatchedSeq(Rc<Vec<NamedMatch>>, syntax_pos::Span), MatchedNonterminal(Rc<Nonterminal>) } fn nameize<I: Iterator<Item=NamedMatch>>(sess: &ParseSess, ms: &[TokenTree], mut res: I) -> NamedParseResult { fn n_rec<I: Iterator<Item=NamedMatch>>(sess: &ParseSess, m: &TokenTree, mut res: &mut I, ret_val: &mut HashMap<Ident, Rc<NamedMatch>>) -> Result<(), (syntax_pos::Span, String)> { match *m { TokenTree::Sequence(_, ref seq) => { for next_m in &seq.tts { n_rec(sess, next_m, res.by_ref(), ret_val)? } } TokenTree::Delimited(_, ref delim) => { for next_m in &delim.tts { n_rec(sess, next_m, res.by_ref(), ret_val)?; } } TokenTree::MetaVarDecl(span, _, id) if id.name == keywords::Invalid.name() => { if sess.missing_fragment_specifiers.borrow_mut().remove(&span) { return Err((span, "missing fragment specifier".to_string())); } } TokenTree::MetaVarDecl(sp, bind_name, _) => { match ret_val.entry(bind_name) { Vacant(spot) => { // FIXME(simulacrum): Don't construct Rc here spot.insert(Rc::new(res.next().unwrap())); } Occupied(..) => { return Err((sp, format!("duplicated bind name: {}", bind_name))) } } } TokenTree::Token(..) => (), } Ok(()) } let mut ret_val = HashMap::new(); for m in ms { match n_rec(sess, m, res.by_ref(), &mut ret_val) { Ok(_) => {}, Err((sp, msg)) => return Error(sp, msg), } } Success(ret_val) } pub enum ParseResult<T> { Success(T), /// Arm failed to match. If the second parameter is `token::Eof`, it /// indicates an unexpected end of macro invocation. Otherwise, it /// indicates that no rules expected the given token. Failure(syntax_pos::Span, Token), /// Fatal error (malformed macro?). Abort compilation. Error(syntax_pos::Span, String) } pub fn parse_failure_msg(tok: Token) -> String { match tok { token::Eof => "unexpected end of macro invocation".to_string(), _ => format!("no rules expected the token `{}`", pprust::token_to_string(&tok)), } } /// Perform a token equality check, ignoring syntax context (that is, an unhygienic comparison) fn token_name_eq(t1 : &Token, t2 : &Token) -> bool { if let (Some(id1), Some(id2)) = (t1.ident(), t2.ident()) { id1.name == id2.name } else if let (&token::Lifetime(id1), &token::Lifetime(id2)) = (t1, t2) { id1.name == id2.name } else { *t1 == *t2 } } fn create_matches(len: usize) -> Vec<Rc<Vec<NamedMatch>>> { (0..len).into_iter().map(|_| Rc::new(Vec::new())).collect() } fn inner_parse_loop(sess: &ParseSess, cur_eis: &mut SmallVector<Box<MatcherPos>>, next_eis: &mut Vec<Box<MatcherPos>>, eof_eis: &mut SmallVector<Box<MatcherPos>>, bb_eis: &mut SmallVector<Box<MatcherPos>>, token: &Token, span: syntax_pos::Span) -> ParseResult<()> { while let Some(mut ei) = cur_eis.pop() { // When unzipped trees end, remove them while ei.idx >= ei.top_elts.len() { match ei.stack.pop() { Some(MatcherTtFrame { elts, idx }) => { ei.top_elts = elts; ei.idx = idx + 1; } None => break } } let idx = ei.idx; let len = ei.top_elts.len(); // at end of sequence if idx >= len { // We are repeating iff there is a parent if ei.up.is_some() { // Disregarding the separator, add the "up" case to the tokens that should be // examined. // (remove this condition to make trailing seps ok) if idx == len { let mut new_pos = ei.up.clone().unwrap(); // update matches (the MBE "parse tree") by appending // each tree as a subtree. // Only touch the binders we have actually bound for idx in ei.match_lo..ei.match_hi { let sub = ei.matches[idx].clone(); new_pos.push_match(idx, MatchedSeq(sub, Span { lo: ei.sp_lo, ..span })); } new_pos.match_cur = ei.match_hi; new_pos.idx += 1; cur_eis.push(new_pos); } // Check if we need a separator if idx == len && ei.sep.is_some() { // We have a separator, and it is the current token. if ei.sep.as_ref().map(|sep| token_name_eq(token, sep)).unwrap_or(false) { ei.idx += 1; next_eis.push(ei); } } else { // we don't need a separator ei.match_cur = ei.match_lo; ei.idx = 0; cur_eis.push(ei); } } else { // We aren't repeating, so we must be potentially at the end of the input. eof_eis.push(ei); } } else { match ei.top_elts.get_tt(idx) { /* need to descend into sequence */ TokenTree::Sequence(sp, seq) => { if seq.op == quoted::KleeneOp::ZeroOrMore { // Examine the case where there are 0 matches of this sequence let mut new_ei = ei.clone(); new_ei.match_cur += seq.num_captures; new_ei.idx += 1; for idx in ei.match_cur..ei.match_cur + seq.num_captures { new_ei.push_match(idx, MatchedSeq(Rc::new(vec![]), sp)); } cur_eis.push(new_ei); } // Examine the case where there is at least one match of this sequence let matches = create_matches(ei.matches.len()); cur_eis.push(Box::new(MatcherPos { stack: vec![], sep: seq.separator.clone(), idx: 0, matches: matches, match_lo: ei.match_cur, match_cur: ei.match_cur, match_hi: ei.match_cur + seq.num_captures, up: Some(ei), sp_lo: sp.lo, top_elts: Tt(TokenTree::Sequence(sp, seq)), })); } TokenTree::MetaVarDecl(span, _, id) if id.name == keywords::Invalid.name() => { if sess.missing_fragment_specifiers.borrow_mut().remove(&span) { return Error(span, "missing fragment specifier".to_string()); } } TokenTree::MetaVarDecl(..) => { // Built-in nonterminals never start with these tokens, // so we can eliminate them from consideration. match *token { token::CloseDelim(_) => {}, _ => bb_eis.push(ei), } } seq @ TokenTree::Delimited(..) | seq @ TokenTree::Token(_, DocComment(..)) => { let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq)); let idx = ei.idx; ei.stack.push(MatcherTtFrame { elts: lower_elts, idx: idx, }); ei.idx = 0; cur_eis.push(ei); } TokenTree::Token(_, ref t) => { if token_name_eq(t, token) { ei.idx += 1; next_eis.push(ei); } } } } } Success(()) } pub fn parse(sess: &ParseSess, tts: TokenStream, ms: &[TokenTree], directory: Option<Directory>, recurse_into_modules: bool) -> NamedParseResult { let mut parser = Parser::new(sess, tts, directory, recurse_into_modules, true); let mut cur_eis = SmallVector::one(initial_matcher_pos(ms.to_owned(), parser.span.lo)); let mut next_eis = Vec::new(); // or proceed normally loop { let mut bb_eis = SmallVector::new(); // black-box parsed by parser.rs let mut eof_eis = SmallVector::new(); assert!(next_eis.is_empty()); match inner_parse_loop(sess, &mut cur_eis, &mut next_eis, &mut eof_eis, &mut bb_eis, &parser.token, parser.span) { Success(_) => {}, Failure(sp, tok) => return Failure(sp, tok), Error(sp, msg) => return Error(sp, msg), } // inner parse loop handled all cur_eis, so it's empty assert!(cur_eis.is_empty()); /* error messages here could be improved with links to orig. rules */ if token_name_eq(&parser.token, &token::Eof) { if eof_eis.len() == 1 { let matches = eof_eis[0].matches.iter_mut().map(|mut dv| { Rc::make_mut(dv).pop().unwrap() }); return nameize(sess, ms, matches); } else if eof_eis.len() > 1 { return Error(parser.span, "ambiguity: multiple successful parses".to_string()); } else { return Failure(parser.span, token::Eof); } } else if (!bb_eis.is_empty() && !next_eis.is_empty()) || bb_eis.len() > 1 { let nts = bb_eis.iter().map(|ei| match ei.top_elts.get_tt(ei.idx) { TokenTree::MetaVarDecl(_, bind, name) => { format!("{} ('{}')", name, bind) } _ => panic!() }).collect::<Vec<String>>().join(" or "); return Error(parser.span, format!( "local ambiguity: multiple parsing options: {}", match next_eis.len() { 0 => format!("built-in NTs {}.", nts), 1 => format!("built-in NTs {} or 1 other option.", nts), n => format!("built-in NTs {} or {} other options.", nts, n), } )); } else if bb_eis.is_empty() && next_eis.is_empty() { return Failure(parser.span, parser.token); } else if !next_eis.is_empty() { /* Now process the next token */ cur_eis.extend(next_eis.drain(..)); parser.bump(); } else /* bb_eis.len() == 1 */ { let mut ei = bb_eis.pop().unwrap(); if let TokenTree::MetaVarDecl(span, _, ident) = ei.top_elts.get_tt(ei.idx) { let match_cur = ei.match_cur; ei.push_match(match_cur, MatchedNonterminal(Rc::new(parse_nt(&mut parser, span, &ident.name.as_str())))); ei.idx += 1; ei.match_cur += 1; } else { unreachable!() } cur_eis.push(ei); } assert!(!cur_eis.is_empty()); } } fn parse_nt<'a>(p: &mut Parser<'a>, sp: Span, name: &str) -> Nonterminal { if name == "tt" { return token::NtTT(p.parse_token_tree()); } // check at the beginning and the parser checks after each bump p.process_potential_macro_variable(); match name { "item" => match panictry!(p.parse_item()) { Some(i) => token::NtItem(i), None => { p.fatal("expected an item keyword").emit(); panic!(FatalError); } }, "block" => token::NtBlock(panictry!(p.parse_block())), "stmt" => match panictry!(p.parse_stmt()) { Some(s) => token::NtStmt(s), None => { p.fatal("expected a statement").emit(); panic!(FatalError); } }, "pat" => token::NtPat(panictry!(p.parse_pat())), "expr" => token::NtExpr(panictry!(p.parse_expr())), "ty" => token::NtTy(panictry!(p.parse_ty())), // this could be handled like a token, since it is one "ident" => match p.token { token::Ident(sn) => { p.bump(); token::NtIdent(Spanned::<Ident>{node: sn, span: p.prev_span}) } _ => { let token_str = pprust::token_to_string(&p.token); p.fatal(&format!("expected ident, found {}", &token_str[..])).emit(); panic!(FatalError) } }, "path" => { token::NtPath(panictry!(p.parse_path(PathStyle::Type))) }, "meta" => token::NtMeta(panictry!(p.parse_meta_item())), "vis" => token::NtVis(panictry!(p.parse_visibility(true))), // this is not supposed to happen, since it has been checked // when compiling the macro. _ => p.span_bug(sp, "invalid fragment specifier") } }
37.690909
100
0.523541
d787e6c4f819c0dd5f4a2057f738c062bada7608
33,206
use crate::charutils::*; use crate::unlikely; use crate::StaticNode; use crate::*; #[cfg(all(target_arch = "x86", feature = "swar-number-parsing"))] use std::arch::x86::*; #[cfg(all(target_arch = "x86_64", feature = "swar-number-parsing"))] use std::arch::x86_64::*; const POWER_OF_TEN: [f64; 632] = [ 1e-323, 1e-322, 1e-321, 1e-320, 1e-319, 1e-318, 1e-317, 1e-316, 1e-315, 1e-314, 1e-313, 1e-312, 1e-311, 1e-310, 1e-309, 1e-308, 1e-307, 1e-306, 1e-305, 1e-304, 1e-303, 1e-302, 1e-301, 1e-300, 1e-299, 1e-298, 1e-297, 1e-296, 1e-295, 1e-294, 1e-293, 1e-292, 1e-291, 1e-290, 1e-289, 1e-288, 1e-287, 1e-286, 1e-285, 1e-284, 1e-283, 1e-282, 1e-281, 1e-280, 1e-279, 1e-278, 1e-277, 1e-276, 1e-275, 1e-274, 1e-273, 1e-272, 1e-271, 1e-270, 1e-269, 1e-268, 1e-267, 1e-266, 1e-265, 1e-264, 1e-263, 1e-262, 1e-261, 1e-260, 1e-259, 1e-258, 1e-257, 1e-256, 1e-255, 1e-254, 1e-253, 1e-252, 1e-251, 1e-250, 1e-249, 1e-248, 1e-247, 1e-246, 1e-245, 1e-244, 1e-243, 1e-242, 1e-241, 1e-240, 1e-239, 1e-238, 1e-237, 1e-236, 1e-235, 1e-234, 1e-233, 1e-232, 1e-231, 1e-230, 1e-229, 1e-228, 1e-227, 1e-226, 1e-225, 1e-224, 1e-223, 1e-222, 1e-221, 1e-220, 1e-219, 1e-218, 1e-217, 1e-216, 1e-215, 1e-214, 1e-213, 1e-212, 1e-211, 1e-210, 1e-209, 1e-208, 1e-207, 1e-206, 1e-205, 1e-204, 1e-203, 1e-202, 1e-201, 1e-200, 1e-199, 1e-198, 1e-197, 1e-196, 1e-195, 1e-194, 1e-193, 1e-192, 1e-191, 1e-190, 1e-189, 1e-188, 1e-187, 1e-186, 1e-185, 1e-184, 1e-183, 1e-182, 1e-181, 1e-180, 1e-179, 1e-178, 1e-177, 1e-176, 1e-175, 1e-174, 1e-173, 1e-172, 1e-171, 1e-170, 1e-169, 1e-168, 1e-167, 1e-166, 1e-165, 1e-164, 1e-163, 1e-162, 1e-161, 1e-160, 1e-159, 1e-158, 1e-157, 1e-156, 1e-155, 1e-154, 1e-153, 1e-152, 1e-151, 1e-150, 1e-149, 1e-148, 1e-147, 1e-146, 1e-145, 1e-144, 1e-143, 1e-142, 1e-141, 1e-140, 1e-139, 1e-138, 1e-137, 1e-136, 1e-135, 1e-134, 1e-133, 1e-132, 1e-131, 1e-130, 1e-129, 1e-128, 1e-127, 1e-126, 1e-125, 1e-124, 1e-123, 1e-122, 1e-121, 1e-120, 1e-119, 1e-118, 1e-117, 1e-116, 1e-115, 1e-114, 1e-113, 1e-112, 1e-111, 1e-110, 1e-109, 1e-108, 1e-107, 1e-106, 1e-105, 1e-104, 1e-103, 1e-102, 1e-101, 1e-100, 1e-99, 1e-98, 1e-97, 1e-96, 1e-95, 1e-94, 1e-93, 1e-92, 1e-91, 1e-90, 1e-89, 1e-88, 1e-87, 1e-86, 1e-85, 1e-84, 1e-83, 1e-82, 1e-81, 1e-80, 1e-79, 1e-78, 1e-77, 1e-76, 1e-75, 1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, 1e-68, 1e-67, 1e-66, 1e-65, 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57, 1e-56, 1e-55, 1e-54, 1e-53, 1e-52, 1e-51, 1e-50, 1e-49, 1e-48, 1e-47, 1e-46, 1e-45, 1e-44, 1e-43, 1e-42, 1e-41, 1e-40, 1e-39, 1e-38, 1e-37, 1e-36, 1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29, 1e-28, 1e-27, 1e-26, 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, 1e-18, 1e-17, 1e-16, 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29, 1e30, 1e31, 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38, 1e39, 1e40, 1e41, 1e42, 1e43, 1e44, 1e45, 1e46, 1e47, 1e48, 1e49, 1e50, 1e51, 1e52, 1e53, 1e54, 1e55, 1e56, 1e57, 1e58, 1e59, 1e60, 1e61, 1e62, 1e63, 1e64, 1e65, 1e66, 1e67, 1e68, 1e69, 1e70, 1e71, 1e72, 1e73, 1e74, 1e75, 1e76, 1e77, 1e78, 1e79, 1e80, 1e81, 1e82, 1e83, 1e84, 1e85, 1e86, 1e87, 1e88, 1e89, 1e90, 1e91, 1e92, 1e93, 1e94, 1e95, 1e96, 1e97, 1e98, 1e99, 1e100, 1e101, 1e102, 1e103, 1e104, 1e105, 1e106, 1e107, 1e108, 1e109, 1e110, 1e111, 1e112, 1e113, 1e114, 1e115, 1e116, 1e117, 1e118, 1e119, 1e120, 1e121, 1e122, 1e123, 1e124, 1e125, 1e126, 1e127, 1e128, 1e129, 1e130, 1e131, 1e132, 1e133, 1e134, 1e135, 1e136, 1e137, 1e138, 1e139, 1e140, 1e141, 1e142, 1e143, 1e144, 1e145, 1e146, 1e147, 1e148, 1e149, 1e150, 1e151, 1e152, 1e153, 1e154, 1e155, 1e156, 1e157, 1e158, 1e159, 1e160, 1e161, 1e162, 1e163, 1e164, 1e165, 1e166, 1e167, 1e168, 1e169, 1e170, 1e171, 1e172, 1e173, 1e174, 1e175, 1e176, 1e177, 1e178, 1e179, 1e180, 1e181, 1e182, 1e183, 1e184, 1e185, 1e186, 1e187, 1e188, 1e189, 1e190, 1e191, 1e192, 1e193, 1e194, 1e195, 1e196, 1e197, 1e198, 1e199, 1e200, 1e201, 1e202, 1e203, 1e204, 1e205, 1e206, 1e207, 1e208, 1e209, 1e210, 1e211, 1e212, 1e213, 1e214, 1e215, 1e216, 1e217, 1e218, 1e219, 1e220, 1e221, 1e222, 1e223, 1e224, 1e225, 1e226, 1e227, 1e228, 1e229, 1e230, 1e231, 1e232, 1e233, 1e234, 1e235, 1e236, 1e237, 1e238, 1e239, 1e240, 1e241, 1e242, 1e243, 1e244, 1e245, 1e246, 1e247, 1e248, 1e249, 1e250, 1e251, 1e252, 1e253, 1e254, 1e255, 1e256, 1e257, 1e258, 1e259, 1e260, 1e261, 1e262, 1e263, 1e264, 1e265, 1e266, 1e267, 1e268, 1e269, 1e270, 1e271, 1e272, 1e273, 1e274, 1e275, 1e276, 1e277, 1e278, 1e279, 1e280, 1e281, 1e282, 1e283, 1e284, 1e285, 1e286, 1e287, 1e288, 1e289, 1e290, 1e291, 1e292, 1e293, 1e294, 1e295, 1e296, 1e297, 1e298, 1e299, 1e300, 1e301, 1e302, 1e303, 1e304, 1e305, 1e306, 1e307, 1e308, ]; //#[inline(always)] #[cfg_attr(not(feature = "no-inline"), inline(always))] pub fn is_integer(c: u8) -> bool { // this gets compiled to (uint8_t)(c - '0') <= 9 on all decent compilers c >= b'0' && c <= b'9' } // We need to check that the character following a zero is valid. This is // probably frequent and it is hard than it looks. We are building all of this // just to differentiate between 0x1 (invalid), 0,1 (valid) 0e1 (valid)... const STRUCTURAL_OR_WHITESPACE_OR_EXPONENT_OR_DECIMAL_NEGATED: [bool; 256] = [ false, true, true, true, true, true, true, true, true, false, false, true, true, false, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, false, true, true, true, true, true, true, true, true, true, true, true, false, true, false, true, true, true, true, true, true, true, true, true, true, true, false, true, true, true, true, true, true, true, true, true, true, false, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, false, true, false, true, true, true, true, true, true, true, false, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, false, true, false, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, ]; #[cfg_attr(not(feature = "no-inline"), inline(always))] fn is_not_structural_or_whitespace_or_exponent_or_decimal(c: u8) -> bool { unsafe { *STRUCTURAL_OR_WHITESPACE_OR_EXPONENT_OR_DECIMAL_NEGATED.get_unchecked(c as usize) } } // #ifdef _MSC_VER // check quickly whether the next 8 chars are made of digits // at a glance, it looks better than Mula's // http://0x80.pl/articles/swar-digits-validate.html #[cfg(all(feature = "swar-number-parsing"))] #[cfg_attr(not(feature = "no-inline"), inline)] #[allow(clippy::cast_ptr_alignment)] fn is_made_of_eight_digits_fast(chars: &[u8]) -> bool { // We know what we're doing right? :P let val: u64 = unsafe { *(chars.as_ptr() as *const u64) }; // let val: __m64 = *(chars as *const __m64); // a branchy method might be faster: // return (( val & 0xF0F0F0F0F0F0F0F0 ) == 0x3030303030303030) // && (( (val + 0x0606060606060606) & 0xF0F0F0F0F0F0F0F0 ) == // 0x3030303030303030); ((val & 0xF0F0_F0F0_F0F0_F0F0) | (((val.wrapping_add(0x0606_0606_0606_0606)) & 0xF0F0_F0F0_F0F0_F0F0) >> 4)) == 0x3333_3333_3333_3333 } #[cfg_attr(not(feature = "no-inline"), inline)] #[cfg(all( any(target_arch = "x86", target_arch = "x86_64"), feature = "swar-number-parsing" ))] #[allow( clippy::cast_sign_loss, clippy::cast_possible_wrap, clippy::cast_ptr_alignment )] fn parse_eight_digits_unrolled(chars: &[u8]) -> u32 { unsafe { // this actually computes *16* values so we are being wasteful. let ascii0: __m128i = _mm_set1_epi8(b'0' as i8); let mul_1_10: __m128i = _mm_setr_epi8(10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1); let mul_1_100: __m128i = _mm_setr_epi16(100, 1, 100, 1, 100, 1, 100, 1); let mul_1_10000: __m128i = _mm_setr_epi16(10000, 1, 10000, 1, 10000, 1, 10000, 1); // We know what we're doing right? :P let input: __m128i = _mm_sub_epi8( _mm_loadu_si128(chars.get_unchecked(0..16).as_ptr() as *const __m128i), ascii0, ); let t1: __m128i = _mm_maddubs_epi16(input, mul_1_10); let t2: __m128i = _mm_madd_epi16(t1, mul_1_100); let t3: __m128i = _mm_packus_epi32(t2, t2); let t4: __m128i = _mm_madd_epi16(t3, mul_1_10000); _mm_cvtsi128_si32(t4) as u32 // only captures the sum of the first 8 digits, drop the rest } } #[cfg_attr(not(feature = "no-inline"), inline)] #[cfg(target_feature = "neon")] fn parse_eight_digits_unrolled(chars: &[u8]) -> u32 { let val: u64 = unsafe { *(chars.as_ptr() as *const u64) }; // memcpy(&val, chars, sizeof(u64)); let val = (val & 0x0F0F0F0F0F0F0F0F).wrapping_mul(2561) >> 8; let val = (val & 0x00FF00FF00FF00FF).wrapping_mul(6553601) >> 16; return ((val & 0x0000FFFF0000FFFF).wrapping_mul(42949672960001) >> 32) as u32; } impl<'de> Deserializer<'de> { /// called by `parse_number` when we know that the output is a float, /// but where there might be some integer overflow. The trick here is to /// parse using floats from the start. /// Do not call this function directly as it skips some of the checks from /// `parse_number` /// /// This function will almost never be called!!! /// /// Note: a redesign could avoid this function entirely. /// #[inline(never)] #[allow( clippy::cast_sign_loss, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::too_many_lines )] fn parse_float(idx: usize, p: &[u8], negative: bool) -> Result<StaticNode> { let mut digitcount = if negative { 1 } else { 0 }; let mut i: f64; let mut digit: u8; let mut d; if unsafe { *p.get_unchecked(digitcount) } == b'0' { // 0 cannot be followed by an integer digitcount += 1; i = 0.0; } else { digit = unsafe { *p.get_unchecked(digitcount) } - b'0'; i = f64::from(digit); digitcount += 1; while is_integer(unsafe { *p.get_unchecked(digitcount) }) { digit = unsafe { *p.get_unchecked(digitcount) } - b'0'; i = 10.0 * i + f64::from(digit); digitcount += 1; } } if unsafe { *p.get_unchecked(digitcount) } == b'.' { let mut fraction: u64 = 0; let mut fraction_weight: u64 = 10; digitcount += 1; //let mut fractionalweight: f64 = 1.0; d = unsafe { *p.get_unchecked(digitcount) }; if is_integer(d) { digit = d - b'0'; digitcount += 1; fraction += u64::from(digit); } else { return Err(Self::raw_error( idx + digitcount, d as char, ErrorType::InvalidNumber, )); } while is_integer(unsafe { *p.get_unchecked(digitcount) }) && fraction_weight <= 10_000_000_000_000_000_u64 { digit = unsafe { *p.get_unchecked(digitcount) } - b'0'; digitcount += 1; fraction_weight *= 10; fraction = fraction * 10 + u64::from(digit); } let mut fraction_weight = fraction_weight as f64; let mut fraction = (fraction as f64) / fraction_weight; while is_integer(unsafe { *p.get_unchecked(digitcount) }) { digit = unsafe { *p.get_unchecked(digitcount) } - b'0'; digitcount += 1; fraction_weight *= 10.0; fraction += f64::from(digit) / fraction_weight; } i += fraction; } if (unsafe { *p.get_unchecked(digitcount) } == b'e') || (unsafe { *p.get_unchecked(digitcount) } == b'E') { digitcount += 1; let mut negexp: bool = false; if unsafe { *p.get_unchecked(digitcount) } == b'-' { negexp = true; digitcount += 1; } else if unsafe { *p.get_unchecked(digitcount) } == b'+' { digitcount += 1; } d = unsafe { *p.get_unchecked(digitcount) }; if !is_integer(d) { return Err(Self::raw_error( idx + digitcount, d as char, ErrorType::InvalidNumber, )); } digit = unsafe { *p.get_unchecked(digitcount) } - b'0'; let mut expnumber: u32 = u32::from(digit); // exponential part digitcount += 1; d = unsafe { *p.get_unchecked(digitcount) }; if is_integer(d) { digit = d - b'0'; expnumber = 10 * expnumber + u32::from(digit); digitcount += 1; } d = unsafe { *p.get_unchecked(digitcount) }; if is_integer(d) { digit = d - b'0'; expnumber = 10 * expnumber + u32::from(digit); digitcount += 1; } d = unsafe { *p.get_unchecked(digitcount) }; if is_integer(d) { digit = d - b'0'; expnumber = 10 * expnumber + u32::from(digit); digitcount += 1; } d = unsafe { *p.get_unchecked(digitcount) }; if is_integer(d) { // we refuse to parse this return Err(Self::raw_error( idx + digitcount, d as char, ErrorType::InvalidNumber, )); } let exponent: i32 = if negexp { -(expnumber as i32) } else { expnumber as i32 }; if (exponent > 308) || (exponent < -323) { // we refuse to parse this return Err(Self::raw_error( idx + digitcount, d as char, ErrorType::InvalidExponent, )); } i *= POWER_OF_TEN[(323 + exponent) as usize]; } d = unsafe { *p.get_unchecked(digitcount) }; if is_structural_or_whitespace(d) == 0 { Err(Self::raw_error( idx + digitcount, d as char, ErrorType::InvalidNumber, )) } else { Ok(StaticNode::F64(if negative { -i } else { i })) } } /// called by `parse_number` when we know that the output is an integer, /// but where there might be some integer overflow. /// we want to catch overflows! /// Do not call this function directly as it skips some of the checks from /// `parse_number` /// /// This function will almost never be called!!! /// #[cfg(not(feature = "128bit"))] #[inline(never)] #[allow(clippy::cast_possible_wrap)] fn parse_large_integer(idx: usize, buf: &[u8], negative: bool) -> Result<StaticNode> { let mut digitcount = if negative { 1 } else { 0 }; let mut i: u64; let mut d = unsafe { *buf.get_unchecked(digitcount) }; let mut digit: u8; if d == b'0' { digitcount += 1; d = unsafe { *buf.get_unchecked(digitcount) }; i = 0; } else { digit = d - b'0'; i = u64::from(digit); digitcount += 1; d = unsafe { *buf.get_unchecked(digitcount) }; // the is_made_of_eight_digits_fast routine is unlikely to help here because // we rarely see large integer parts like 123456789 while is_integer(d) { digit = d - b'0'; if let Some(i1) = i .checked_mul(10) .and_then(|i| i.checked_add(u64::from(digit))) { i = i1; } else { return Err(Self::raw_error( idx + digitcount, d as char, ErrorType::Overflow, )); } digitcount += 1; d = unsafe { *buf.get_unchecked(digitcount) }; } } if negative && i > 9_223_372_036_854_775_808 { //i64::min_value() * -1 return Err(Self::raw_error( idx + digitcount, d as char, ErrorType::Overflow, )); } if is_structural_or_whitespace(d) == 0 { Err(Self::raw_error( idx + digitcount, d as char, ErrorType::InvalidNumber, )) } else if negative { unsafe { Ok(StaticNode::I64(static_cast_i64!(i.wrapping_neg()))) } } else { Ok(StaticNode::U64(i)) } } #[cfg(feature = "128bit")] #[inline(never)] #[allow(clippy::cast_possible_wrap)] fn parse_large_integer(idx: usize, buf: &[u8], negative: bool) -> Result<StaticNode> { use std::convert::TryFrom; let mut digitcount = if negative { 1 } else { 0 }; let mut i: u128; let mut d = unsafe { *buf.get_unchecked(digitcount) }; let mut digit: u8; if d == b'0' { digitcount += 1; d = unsafe { *buf.get_unchecked(digitcount) }; i = 0; } else { digit = d - b'0'; i = u128::from(digit); digitcount += 1; d = unsafe { *buf.get_unchecked(digitcount) }; // the is_made_of_eight_digits_fast routine is unlikely to help here because // we rarely see large integer parts like 123456789 while is_integer(d) { digit = d - b'0'; if let Some(i1) = i .checked_mul(10) .and_then(|i| i.checked_add(u128::from(digit))) { i = i1; } else { return Err(Self::raw_error( idx + digitcount, d as char, ErrorType::Overflow, )); } digitcount += 1; d = unsafe { *buf.get_unchecked(digitcount) }; } } if negative && i > 170_141_183_460_469_231_731_687_303_715_884_105_728_u128 { //i64::min_value() * -1 return Err(Self::raw_error( idx + digitcount, d as char, ErrorType::Overflow, )); } if is_structural_or_whitespace(d) == 0 { Err(Self::raw_error( idx + digitcount, d as char, ErrorType::InvalidNumber, )) } else if negative { let i = unsafe { static_cast_i128!(i.wrapping_neg()) }; if let Ok(i) = i64::try_from(i) { Ok(StaticNode::I64(i)) } else { Ok(StaticNode::I128(i)) } } else if let Ok(i) = u64::try_from(i) { Ok(StaticNode::U64(i)) } else { Ok(StaticNode::U128(i)) } } // parse the number at buf + offset // define JSON_TEST_NUMBERS for unit testing #[cfg_attr(not(feature = "no-inline"), inline(always))] #[allow( clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss, clippy::cast_possible_wrap, clippy::too_many_lines )] pub(crate) fn parse_number_int(idx: usize, buf: &[u8], negative: bool) -> Result<StaticNode> { let mut byte_count = if negative { 1 } else { 0 }; let mut ignore_count: u8 = 0; //let startdigits: *const u8 = p; let mut i: u64; let mut d = unsafe { *buf.get_unchecked(byte_count) }; let mut digit: u8; if d == b'0' { // 0 cannot be followed by an integer byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; if is_not_structural_or_whitespace_or_exponent_or_decimal(d) { return Err(Self::raw_error( idx + byte_count, d as char, ErrorType::InvalidNumber, )); } i = 0; } else { if !is_integer(d) { // must start with an integer return Err(Self::raw_error( idx + byte_count, d as char, ErrorType::InvalidNumber, )); } digit = d - b'0'; i = u64::from(digit); byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; // the is_made_of_eight_digits_fast routine is unlikely to help here because // we rarely see large integer parts like 123456789 while is_integer(d) { digit = d - b'0'; i = i.wrapping_mul(10); if let Some(i1) = i.checked_add(u64::from(digit)) { i = i1; } else { return Err(Self::raw_error( idx + byte_count, d as char, ErrorType::Overflow, )); } //i = 10 * i + u64::from(digit); // might overflow byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; } } let mut exponent: i64 = if d == b'.' { ignore_count += 1; byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; let firstafterperiod = byte_count; if is_integer(d) { digit = d - b'0'; byte_count += 1; i = i.wrapping_mul(10).wrapping_add(u64::from(digit)); } else { return Err(Self::raw_error( idx + byte_count, d as char, ErrorType::InvalidNumber, )); } // this helps if we have lots of decimals! // this turns out to be frequent enough. #[cfg(feature = "swar-number-parsing")] { // FIXME // can we omit this: buf.len() - byte_count >= 8 if is_made_of_eight_digits_fast(unsafe { buf.get_unchecked(byte_count..) }) { i = i.wrapping_mul(100_000_000).wrapping_add(u64::from( parse_eight_digits_unrolled(unsafe { buf.get_unchecked(byte_count..) }), )); byte_count += 8; } } d = unsafe { *buf.get_unchecked(byte_count) }; while is_integer(d) { digit = d - b'0'; i = i.wrapping_mul(10).wrapping_add(u64::from(digit)); byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; } firstafterperiod as i64 - byte_count as i64 } else { 0 }; let mut expnumber: i16 = 0; // exponential part if (d == b'e') || (d == b'E') { ignore_count += 1; byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; let mut negexp: bool = false; if d == b'-' { negexp = true; ignore_count += 1; byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; } else if d == b'+' { ignore_count += 1; byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; } if !is_integer(d) { return Err(Self::raw_error( idx + byte_count, d as char, ErrorType::InvalidNumber, )); } digit = d - b'0'; expnumber = i16::from(digit); byte_count += 1; ignore_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; if is_integer(d) { digit = d - b'0'; expnumber = 10 * expnumber + i16::from(digit); ignore_count += 1; byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; } if is_integer(d) { digit = d - b'0'; expnumber = 10 * expnumber + i16::from(digit); ignore_count += 1; byte_count += 1; d = unsafe { *buf.get_unchecked(byte_count) }; } if is_integer(d) { // we refuse to parse this return Err(Self::raw_error( idx + byte_count, d as char, ErrorType::InvalidNumber, )); } exponent += i64::from(if negexp { -expnumber } else { expnumber }); } let v = if (exponent != 0) || (expnumber != 0) { if unlikely!((byte_count - ignore_count as usize) >= 19) { // this is uncommon!!! // this is almost never going to get called!!! // we start anew, going slowly!!! return Self::parse_float(idx, buf, negative); } /////////// // We want 0.1e1 to be a float. ////////// if i == 0 { StaticNode::F64(0.0) } else { if (exponent > 308) || (exponent < -323) { //FIXME Parse it as a expensive float perhaps return Self::parse_float(idx, buf, negative); } let mut d1: f64 = i as f64; d1 *= POWER_OF_TEN[(323 + exponent) as usize]; StaticNode::F64(if negative { d1 * -1.0 } else { d1 }) } } else { if unlikely!(byte_count >= 18) { // this is uncommon!!! return Self::parse_large_integer(idx, buf, negative); } if negative { unsafe { StaticNode::I64(static_cast_i64!(i.wrapping_neg())) } } else { StaticNode::U64(i) } }; if is_structural_or_whitespace(d) == 0 { Err(Self::raw_error( idx + byte_count, d as char, ErrorType::InvalidNumber, )) } else { Ok(v) } } } #[cfg(test)] mod test { #![allow(clippy::default_trait_access)] use crate::value::owned::to_value; use crate::value::Value as ValueTrait; use float_cmp::approx_eq; #[test] fn bad_exp() { let mut too_big = String::from("1e309"); let mut too_big = unsafe { too_big.as_bytes_mut() }; let v_too_big = to_value(&mut too_big); assert!(v_too_big.is_err()); let mut too_small = String::from("1e-324"); let mut too_small = unsafe { too_small.as_bytes_mut() }; let v_too_small = to_value(&mut too_small); assert!(v_too_small.is_err()); let mut too_big = String::from("1e1000"); let mut too_big = unsafe { too_big.as_bytes_mut() }; let v_too_big = to_value(&mut too_big); assert!(v_too_big.is_err()); let mut too_small = String::from("1e-1000"); let mut too_small = unsafe { too_small.as_bytes_mut() }; let v_too_small = to_value(&mut too_small); assert!(v_too_small.is_err()); let mut too_big = String::from("100000000000000000000000000000000000000000000e309"); let mut too_big = unsafe { too_big.as_bytes_mut() }; let v_too_big = to_value(&mut too_big); assert!(v_too_big.is_err()); let mut too_small = String::from("100000000000000000000000000000000000000000000e-324"); let mut too_small = unsafe { too_small.as_bytes_mut() }; let v_too_small = to_value(&mut too_small); assert!(v_too_small.is_err()); let mut too_big = String::from("100000000000000000000000000000000000000000000e1000"); let mut too_big = unsafe { too_big.as_bytes_mut() }; let v_too_big = to_value(&mut too_big); assert!(v_too_big.is_err()); let mut too_small = String::from("100000000000000000000000000000000000000000000e-1000"); let mut too_small = unsafe { too_small.as_bytes_mut() }; let v_too_small = to_value(&mut too_small); assert!(v_too_small.is_err()); } #[test] fn bad_dot() { let mut i = String::from("1."); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert!(r.is_err()); let mut i = String::from("1.e"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert!(r.is_err()); let mut i = String::from("100000000000000000000000000000000000000000000."); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert!(r.is_err()); let mut i = String::from("100000000000000000000000000000000000000000000.e"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert!(r.is_err()); } #[test] fn bad_e() { let mut i = String::from("1.0e"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert!(r.is_err()); let mut i = String::from("1.0e"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert!(r.is_err()); let mut i = String::from("100000000000000000000000000000000000000000000.0e"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert!(r.is_err()); let mut i = String::from("100000000000000000000000000000000000000000000.0ee"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert!(r.is_err()); } #[test] fn zero() { let mut i = String::from("0"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i); assert_eq!(r.expect("0"), 0); } #[test] fn float_zero() { let mut i = String::from("0e1"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i).expect("failed to decode"); assert!(approx_eq!(f64, r.as_f64().expect("float"), 0.0)); } #[test] fn minus_309() { let mut i = String::from("-5.96916642387374e-309"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i).expect("failed to decode"); assert!(approx_eq!( f64, r.as_f64().expect("float"), -5.969_166_423_873_74e-309 )) } #[allow(clippy::unreadable_literal)] #[test] fn tiny_float() { let mut i = String::from("-0.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000596916642387374"); let mut i = unsafe { i.as_bytes_mut() }; let r = to_value(&mut i).expect("failed to decode"); assert!(approx_eq!(f64, r.as_f64().expect("float"), -0.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000596916642387374)) } }
42.901809
388
0.535265
23fcd02b57a09461af2317a8a6a6765769a70659
24,294
//! Lookup hir elements using positions in the source code. This is a lossy //! transformation: in general, a single source might correspond to several //! modules, functions, etc, due to macros, cfgs and `#[path=]` attributes on //! modules. //! //! So, this modules should not be used during hir construction, it exists //! purely for "IDE needs". use std::{iter::once, sync::Arc}; use hir_def::{ body::{ self, scope::{ExprScopes, ScopeId}, Body, BodySourceMap, }, expr::{ExprId, Pat, PatId}, path::{ModPath, Path, PathKind}, resolver::{resolver_for_scope, Resolver, TypeNs, ValueNs}, AsMacroCall, DefWithBodyId, FieldId, FunctionId, LocalFieldId, ModuleDefId, VariantId, }; use hir_expand::{hygiene::Hygiene, name::AsName, HirFileId, InFile}; use hir_ty::{ diagnostics::{record_literal_missing_fields, record_pattern_missing_fields}, InferenceResult, Interner, Substitution, TyExt, TyLoweringContext, }; use syntax::{ ast::{self, AstNode}, SyntaxNode, TextRange, TextSize, }; use crate::{ db::HirDatabase, semantics::PathResolution, Adt, BuiltinType, Const, Field, Function, Local, MacroDef, ModuleDef, Static, Struct, Trait, Type, TypeAlias, TypeParam, Variant, }; use base_db::CrateId; /// `SourceAnalyzer` is a convenience wrapper which exposes HIR API in terms of /// original source files. It should not be used inside the HIR itself. #[derive(Debug)] pub(crate) struct SourceAnalyzer { pub(crate) file_id: HirFileId, pub(crate) resolver: Resolver, body: Option<Arc<Body>>, body_source_map: Option<Arc<BodySourceMap>>, infer: Option<Arc<InferenceResult>>, } impl SourceAnalyzer { pub(crate) fn new_for_body( db: &dyn HirDatabase, def: DefWithBodyId, node: InFile<&SyntaxNode>, offset: Option<TextSize>, ) -> SourceAnalyzer { let (body, source_map) = db.body_with_source_map(def); let scopes = db.expr_scopes(def); let scope = match offset { None => scope_for(&scopes, &source_map, node), Some(offset) => scope_for_offset(db, &scopes, &source_map, node.with_value(offset)), }; let resolver = resolver_for_scope(db.upcast(), def, scope); SourceAnalyzer { resolver, body: Some(body), body_source_map: Some(source_map), infer: Some(db.infer(def)), file_id: node.file_id, } } pub(crate) fn new_for_resolver( resolver: Resolver, node: InFile<&SyntaxNode>, ) -> SourceAnalyzer { SourceAnalyzer { resolver, body: None, body_source_map: None, infer: None, file_id: node.file_id, } } fn expr_id(&self, db: &dyn HirDatabase, expr: &ast::Expr) -> Option<ExprId> { let src = match expr { ast::Expr::MacroCall(call) => { self.expand_expr(db, InFile::new(self.file_id, call.clone()))? } _ => InFile::new(self.file_id, expr.clone()), }; let sm = self.body_source_map.as_ref()?; sm.node_expr(src.as_ref()) } fn pat_id(&self, pat: &ast::Pat) -> Option<PatId> { // FIXME: macros, see `expr_id` let src = InFile { file_id: self.file_id, value: pat }; self.body_source_map.as_ref()?.node_pat(src) } fn expand_expr( &self, db: &dyn HirDatabase, expr: InFile<ast::MacroCall>, ) -> Option<InFile<ast::Expr>> { let macro_file = self.body_source_map.as_ref()?.node_macro_file(expr.as_ref())?; let expanded = db.parse_or_expand(macro_file)?; let res = match ast::MacroCall::cast(expanded.clone()) { Some(call) => self.expand_expr(db, InFile::new(macro_file, call))?, _ => InFile::new(macro_file, ast::Expr::cast(expanded)?), }; Some(res) } pub(crate) fn type_of_expr( &self, db: &dyn HirDatabase, expr: &ast::Expr, ) -> Option<(Type, Option<Type>)> { let expr_id = self.expr_id(db, expr)?; let infer = self.infer.as_ref()?; let coerced = infer .expr_adjustments .get(&expr_id) .and_then(|adjusts| adjusts.last().map(|adjust| adjust.target.clone())); let ty = infer[expr_id].clone(); let mk_ty = |ty| Type::new_with_resolver(db, &self.resolver, ty); mk_ty(ty).zip(Some(coerced.and_then(mk_ty))) } pub(crate) fn type_of_pat( &self, db: &dyn HirDatabase, pat: &ast::Pat, ) -> Option<(Type, Option<Type>)> { let pat_id = self.pat_id(pat)?; let infer = self.infer.as_ref()?; let coerced = infer .pat_adjustments .get(&pat_id) .and_then(|adjusts| adjusts.last().map(|adjust| adjust.target.clone())); let ty = infer[pat_id].clone(); let mk_ty = |ty| Type::new_with_resolver(db, &self.resolver, ty); mk_ty(ty).zip(Some(coerced.and_then(mk_ty))) } pub(crate) fn type_of_self( &self, db: &dyn HirDatabase, param: &ast::SelfParam, ) -> Option<Type> { let src = InFile { file_id: self.file_id, value: param }; let pat_id = self.body_source_map.as_ref()?.node_self_param(src)?; let ty = self.infer.as_ref()?[pat_id].clone(); Type::new_with_resolver(db, &self.resolver, ty) } pub(crate) fn resolve_method_call( &self, db: &dyn HirDatabase, call: &ast::MethodCallExpr, ) -> Option<(FunctionId, Substitution)> { let expr_id = self.expr_id(db, &call.clone().into())?; self.infer.as_ref()?.method_resolution(expr_id) } pub(crate) fn resolve_field( &self, db: &dyn HirDatabase, field: &ast::FieldExpr, ) -> Option<Field> { let expr_id = self.expr_id(db, &field.clone().into())?; self.infer.as_ref()?.field_resolution(expr_id).map(|it| it.into()) } pub(crate) fn resolve_record_field( &self, db: &dyn HirDatabase, field: &ast::RecordExprField, ) -> Option<(Field, Option<Local>, Type)> { let record_expr = ast::RecordExpr::cast(field.syntax().parent().and_then(|p| p.parent())?)?; let expr = ast::Expr::from(record_expr); let expr_id = self.body_source_map.as_ref()?.node_expr(InFile::new(self.file_id, &expr))?; let local_name = field.field_name()?.as_name(); let local = if field.name_ref().is_some() { None } else { let path = ModPath::from_segments(PathKind::Plain, once(local_name.clone())); match self.resolver.resolve_path_in_value_ns_fully(db.upcast(), &path) { Some(ValueNs::LocalBinding(pat_id)) => { Some(Local { pat_id, parent: self.resolver.body_owner()? }) } _ => None, } }; let (_, subst) = self.infer.as_ref()?.type_of_expr.get(expr_id)?.as_adt()?; let variant = self.infer.as_ref()?.variant_resolution_for_expr(expr_id)?; let variant_data = variant.variant_data(db.upcast()); let field = FieldId { parent: variant, local_id: variant_data.field(&local_name)? }; let field_ty = db.field_types(variant).get(field.local_id)?.clone().substitute(&Interner, subst); Some((field.into(), local, Type::new_with_resolver(db, &self.resolver, field_ty)?)) } pub(crate) fn resolve_record_pat_field( &self, db: &dyn HirDatabase, field: &ast::RecordPatField, ) -> Option<Field> { let field_name = field.field_name()?.as_name(); let record_pat = ast::RecordPat::cast(field.syntax().parent().and_then(|p| p.parent())?)?; let pat_id = self.pat_id(&record_pat.into())?; let variant = self.infer.as_ref()?.variant_resolution_for_pat(pat_id)?; let variant_data = variant.variant_data(db.upcast()); let field = FieldId { parent: variant, local_id: variant_data.field(&field_name)? }; Some(field.into()) } pub(crate) fn resolve_macro_call( &self, db: &dyn HirDatabase, macro_call: InFile<&ast::MacroCall>, ) -> Option<MacroDef> { let ctx = body::LowerCtx::new(db.upcast(), macro_call.file_id); let path = macro_call.value.path().and_then(|ast| Path::from_src(ast, &ctx))?; self.resolver.resolve_path_as_macro(db.upcast(), path.mod_path()).map(|it| it.into()) } pub(crate) fn resolve_bind_pat_to_const( &self, db: &dyn HirDatabase, pat: &ast::IdentPat, ) -> Option<ModuleDef> { let pat_id = self.pat_id(&pat.clone().into())?; let body = self.body.as_ref()?; let path = match &body[pat_id] { Pat::Path(path) => path, _ => return None, }; let res = resolve_hir_path(db, &self.resolver, path)?; match res { PathResolution::Def(def) => Some(def), _ => None, } } pub(crate) fn resolve_path_as_macro( &self, db: &dyn HirDatabase, path: &ast::Path, ) -> Option<MacroDef> { // This must be a normal source file rather than macro file. let hygiene = Hygiene::new(db.upcast(), self.file_id); let ctx = body::LowerCtx::with_hygiene(db.upcast(), &hygiene); let hir_path = Path::from_src(path.clone(), &ctx)?; resolve_hir_path_as_macro(db, &self.resolver, &hir_path) } pub(crate) fn resolve_path( &self, db: &dyn HirDatabase, path: &ast::Path, ) -> Option<PathResolution> { let parent = || path.syntax().parent(); let mut prefer_value_ns = false; if let Some(path_expr) = parent().and_then(ast::PathExpr::cast) { let expr_id = self.expr_id(db, &path_expr.into())?; let infer = self.infer.as_ref()?; if let Some(assoc) = infer.assoc_resolutions_for_expr(expr_id) { return Some(PathResolution::AssocItem(assoc.into())); } if let Some(VariantId::EnumVariantId(variant)) = infer.variant_resolution_for_expr(expr_id) { return Some(PathResolution::Def(ModuleDef::Variant(variant.into()))); } prefer_value_ns = true; } if let Some(path_pat) = parent().and_then(ast::PathPat::cast) { let pat_id = self.pat_id(&path_pat.into())?; if let Some(assoc) = self.infer.as_ref()?.assoc_resolutions_for_pat(pat_id) { return Some(PathResolution::AssocItem(assoc.into())); } if let Some(VariantId::EnumVariantId(variant)) = self.infer.as_ref()?.variant_resolution_for_pat(pat_id) { return Some(PathResolution::Def(ModuleDef::Variant(variant.into()))); } } if let Some(rec_lit) = parent().and_then(ast::RecordExpr::cast) { let expr_id = self.expr_id(db, &rec_lit.into())?; if let Some(VariantId::EnumVariantId(variant)) = self.infer.as_ref()?.variant_resolution_for_expr(expr_id) { return Some(PathResolution::Def(ModuleDef::Variant(variant.into()))); } } if let Some(pat) = parent() .and_then(ast::RecordPat::cast) .map(ast::Pat::from) .or_else(|| parent().and_then(ast::TupleStructPat::cast).map(ast::Pat::from)) { let pat_id = self.pat_id(&pat)?; if let Some(VariantId::EnumVariantId(variant)) = self.infer.as_ref()?.variant_resolution_for_pat(pat_id) { return Some(PathResolution::Def(ModuleDef::Variant(variant.into()))); } } // This must be a normal source file rather than macro file. let hygiene = Hygiene::new(db.upcast(), self.file_id); let ctx = body::LowerCtx::with_hygiene(db.upcast(), &hygiene); let hir_path = Path::from_src(path.clone(), &ctx)?; // Case where path is a qualifier of another path, e.g. foo::bar::Baz where we are // trying to resolve foo::bar. if let Some(outer_path) = parent().and_then(ast::Path::cast) { if let Some(qualifier) = outer_path.qualifier() { if path == &qualifier { return resolve_hir_path_qualifier(db, &self.resolver, &hir_path); } } } // Case where path is a qualifier of a use tree, e.g. foo::bar::{Baz, Qux} where we are // trying to resolve foo::bar. if let Some(use_tree) = parent().and_then(ast::UseTree::cast) { if let Some(qualifier) = use_tree.path() { if path == &qualifier && use_tree.coloncolon_token().is_some() { return resolve_hir_path_qualifier(db, &self.resolver, &hir_path); } } } if parent().map_or(false, |it| ast::Visibility::can_cast(it.kind())) { resolve_hir_path_qualifier(db, &self.resolver, &hir_path) } else { resolve_hir_path_(db, &self.resolver, &hir_path, prefer_value_ns) } } pub(crate) fn record_literal_missing_fields( &self, db: &dyn HirDatabase, literal: &ast::RecordExpr, ) -> Option<Vec<(Field, Type)>> { let krate = self.resolver.krate()?; let body = self.body.as_ref()?; let infer = self.infer.as_ref()?; let expr_id = self.expr_id(db, &literal.clone().into())?; let substs = infer.type_of_expr[expr_id].as_adt()?.1; let (variant, missing_fields, _exhaustive) = record_literal_missing_fields(db, infer, expr_id, &body[expr_id])?; let res = self.missing_fields(db, krate, substs, variant, missing_fields); Some(res) } pub(crate) fn record_pattern_missing_fields( &self, db: &dyn HirDatabase, pattern: &ast::RecordPat, ) -> Option<Vec<(Field, Type)>> { let krate = self.resolver.krate()?; let body = self.body.as_ref()?; let infer = self.infer.as_ref()?; let pat_id = self.pat_id(&pattern.clone().into())?; let substs = infer.type_of_pat[pat_id].as_adt()?.1; let (variant, missing_fields, _exhaustive) = record_pattern_missing_fields(db, infer, pat_id, &body[pat_id])?; let res = self.missing_fields(db, krate, substs, variant, missing_fields); Some(res) } fn missing_fields( &self, db: &dyn HirDatabase, krate: CrateId, substs: &Substitution, variant: VariantId, missing_fields: Vec<LocalFieldId>, ) -> Vec<(Field, Type)> { let field_types = db.field_types(variant); missing_fields .into_iter() .map(|local_id| { let field = FieldId { parent: variant, local_id }; let ty = field_types[local_id].clone().substitute(&Interner, substs); (field.into(), Type::new_with_resolver_inner(db, krate, &self.resolver, ty)) }) .collect() } pub(crate) fn expand( &self, db: &dyn HirDatabase, macro_call: InFile<&ast::MacroCall>, ) -> Option<HirFileId> { let krate = self.resolver.krate()?; let macro_call_id = macro_call.as_call_id(db.upcast(), krate, |path| { self.resolver.resolve_path_as_macro(db.upcast(), &path) })?; Some(macro_call_id.as_file()).filter(|it| it.expansion_level(db.upcast()) < 64) } pub(crate) fn resolve_variant( &self, db: &dyn HirDatabase, record_lit: ast::RecordExpr, ) -> Option<VariantId> { let infer = self.infer.as_ref()?; let expr_id = self.expr_id(db, &record_lit.into())?; infer.variant_resolution_for_expr(expr_id) } } fn scope_for( scopes: &ExprScopes, source_map: &BodySourceMap, node: InFile<&SyntaxNode>, ) -> Option<ScopeId> { node.value .ancestors() .filter_map(ast::Expr::cast) .filter_map(|it| source_map.node_expr(InFile::new(node.file_id, &it))) .find_map(|it| scopes.scope_for(it)) } fn scope_for_offset( db: &dyn HirDatabase, scopes: &ExprScopes, source_map: &BodySourceMap, offset: InFile<TextSize>, ) -> Option<ScopeId> { scopes .scope_by_expr() .iter() .filter_map(|(id, scope)| { let source = source_map.expr_syntax(*id).ok()?; // FIXME: correctly handle macro expansion if source.file_id != offset.file_id { return None; } let root = source.file_syntax(db.upcast()); let node = source.value.to_node(&root); Some((node.syntax().text_range(), scope)) }) // find containing scope .min_by_key(|(expr_range, _scope)| { ( !(expr_range.start() <= offset.value && offset.value <= expr_range.end()), expr_range.len(), ) }) .map(|(expr_range, scope)| { adjust(db, scopes, source_map, expr_range, offset).unwrap_or(*scope) }) } // XXX: during completion, cursor might be outside of any particular // expression. Try to figure out the correct scope... fn adjust( db: &dyn HirDatabase, scopes: &ExprScopes, source_map: &BodySourceMap, expr_range: TextRange, offset: InFile<TextSize>, ) -> Option<ScopeId> { let child_scopes = scopes .scope_by_expr() .iter() .filter_map(|(id, scope)| { let source = source_map.expr_syntax(*id).ok()?; // FIXME: correctly handle macro expansion if source.file_id != offset.file_id { return None; } let root = source.file_syntax(db.upcast()); let node = source.value.to_node(&root); Some((node.syntax().text_range(), scope)) }) .filter(|&(range, _)| { range.start() <= offset.value && expr_range.contains_range(range) && range != expr_range }); child_scopes .max_by(|&(r1, _), &(r2, _)| { if r1.contains_range(r2) { std::cmp::Ordering::Greater } else if r2.contains_range(r1) { std::cmp::Ordering::Less } else { r1.start().cmp(&r2.start()) } }) .map(|(_ptr, scope)| *scope) } #[inline] pub(crate) fn resolve_hir_path( db: &dyn HirDatabase, resolver: &Resolver, path: &Path, ) -> Option<PathResolution> { resolve_hir_path_(db, resolver, path, false) } #[inline] pub(crate) fn resolve_hir_path_as_macro( db: &dyn HirDatabase, resolver: &Resolver, path: &Path, ) -> Option<MacroDef> { resolver.resolve_path_as_macro(db.upcast(), path.mod_path()).map(Into::into) } fn resolve_hir_path_( db: &dyn HirDatabase, resolver: &Resolver, path: &Path, prefer_value_ns: bool, ) -> Option<PathResolution> { let types = || { let (ty, unresolved) = match path.type_anchor() { Some(type_ref) => { let (_, res) = TyLoweringContext::new(db, resolver).lower_ty_ext(type_ref); res.map(|ty_ns| (ty_ns, path.segments().first())) } None => { let (ty, remaining) = resolver.resolve_path_in_type_ns(db.upcast(), path.mod_path())?; match remaining { Some(remaining) if remaining > 1 => None, _ => Some((ty, path.segments().get(1))), } } }?; // If we are in a TypeNs for a Trait, and we have an unresolved name, try to resolve it as a type // within the trait's associated types. if let (Some(unresolved), &TypeNs::TraitId(trait_id)) = (&unresolved, &ty) { if let Some(type_alias_id) = db.trait_data(trait_id).associated_type_by_name(&unresolved.name) { return Some(PathResolution::Def(ModuleDefId::from(type_alias_id).into())); } } let res = match ty { TypeNs::SelfType(it) => PathResolution::SelfType(it.into()), TypeNs::GenericParam(id) => PathResolution::TypeParam(TypeParam { id }), TypeNs::AdtSelfType(it) | TypeNs::AdtId(it) => { PathResolution::Def(Adt::from(it).into()) } TypeNs::EnumVariantId(it) => PathResolution::Def(Variant::from(it).into()), TypeNs::TypeAliasId(it) => PathResolution::Def(TypeAlias::from(it).into()), TypeNs::BuiltinType(it) => PathResolution::Def(BuiltinType::from(it).into()), TypeNs::TraitId(it) => PathResolution::Def(Trait::from(it).into()), }; match unresolved { Some(unresolved) => res .assoc_type_shorthand_candidates(db, |name, alias| { (name == unresolved.name).then(|| alias) }) .map(TypeAlias::from) .map(Into::into) .map(PathResolution::Def), None => Some(res), } }; let body_owner = resolver.body_owner(); let values = || { resolver.resolve_path_in_value_ns_fully(db.upcast(), path.mod_path()).and_then(|val| { let res = match val { ValueNs::LocalBinding(pat_id) => { let var = Local { parent: body_owner?, pat_id }; PathResolution::Local(var) } ValueNs::FunctionId(it) => PathResolution::Def(Function::from(it).into()), ValueNs::ConstId(it) => PathResolution::Def(Const::from(it).into()), ValueNs::StaticId(it) => PathResolution::Def(Static::from(it).into()), ValueNs::StructId(it) => PathResolution::Def(Struct::from(it).into()), ValueNs::EnumVariantId(it) => PathResolution::Def(Variant::from(it).into()), ValueNs::ImplSelf(impl_id) => PathResolution::SelfType(impl_id.into()), ValueNs::GenericParam(it) => PathResolution::ConstParam(it.into()), }; Some(res) }) }; let items = || { resolver .resolve_module_path_in_items(db.upcast(), path.mod_path()) .take_types() .map(|it| PathResolution::Def(it.into())) }; let macros = || { resolver .resolve_path_as_macro(db.upcast(), path.mod_path()) .map(|def| PathResolution::Macro(def.into())) }; if prefer_value_ns { values().or_else(types) } else { types().or_else(values) } .or_else(items) .or_else(macros) } /// Resolves a path where we know it is a qualifier of another path. /// /// For example, if we have: /// ``` /// mod my { /// pub mod foo { /// struct Bar; /// } /// /// pub fn foo() {} /// } /// ``` /// then we know that `foo` in `my::foo::Bar` refers to the module, not the function. fn resolve_hir_path_qualifier( db: &dyn HirDatabase, resolver: &Resolver, path: &Path, ) -> Option<PathResolution> { let items = resolver .resolve_module_path_in_items(db.upcast(), path.mod_path()) .take_types() .map(|it| PathResolution::Def(it.into())); if items.is_some() { return items; } resolver.resolve_path_in_type_ns_fully(db.upcast(), path.mod_path()).map(|ty| match ty { TypeNs::SelfType(it) => PathResolution::SelfType(it.into()), TypeNs::GenericParam(id) => PathResolution::TypeParam(TypeParam { id }), TypeNs::AdtSelfType(it) | TypeNs::AdtId(it) => PathResolution::Def(Adt::from(it).into()), TypeNs::EnumVariantId(it) => PathResolution::Def(Variant::from(it).into()), TypeNs::TypeAliasId(it) => PathResolution::Def(TypeAlias::from(it).into()), TypeNs::BuiltinType(it) => PathResolution::Def(BuiltinType::from(it).into()), TypeNs::TraitId(it) => PathResolution::Def(Trait::from(it).into()), }) }
37.033537
105
0.573228
5b2593a0bb8f9ee436ea6891e31f1b95977a1116
818
use std::fs; use sg_syntax::{dump_document, lsif_index}; fn main() { if let Some(path) = std::env::args().nth(1) { let contents = match fs::read_to_string(&path) { Ok(contents) => contents, Err(err) => { eprintln!("Failed to read path: {:?}. {}", path, err); return; } }; // let language = determine_language(); let document = match lsif_index("go", &contents) { Ok(document) => document, Err(err) => { eprintln!("Failed to index document: {:?}", err); return; } }; println!("\n\n{}", dump_document(document, &contents)); // println!("{}", dump_document()) } else { panic!("Must pass a filepath"); } }
27.266667
70
0.469438
220f19e44b123aebb6d8a8b42df3f5b671a418fa
16
Negocio.Estoque
8
15
0.875
67bead5fb1dd01992173ae1c9fd920ca5a499203
3,803
//! Tests auto-converted from "sass-spec/spec/css/keyframes.hrx" #[allow(unused)] fn runner() -> crate::TestRunner { super::runner() } mod bubble { #[allow(unused)] use super::runner; #[test] #[ignore] // wrong result fn empty() { assert_eq!( runner().ok("// Regression test for sass/dart-sass#611.\ \na {\ \n @keyframes {/**/}\ \n}\n"), "@keyframes {\ \n /**/\ \n}\n" ); } #[test] #[ignore] // wrong result fn rules() { assert_eq!( runner().ok("// Regression test for sass/libsass#472\ \na {\ \n b: c;\ \n @keyframes d {\ \n to {\ \n e: f;\ \n }\ \n }\ \n}\n"), "a {\ \n b: c;\ \n}\ \n@keyframes d {\ \n to {\ \n e: f;\ \n }\ \n}\n" ); } } mod name { #[allow(unused)] use super::runner; #[test] fn interpolated() { assert_eq!( runner().ok("$a: b;\ \n@keyframes #{$a} {\ \n to {\ \n c: d;\ \n }\ \n}\n"), "@keyframes b {\ \n to {\ \n c: d;\ \n }\ \n}\n" ); } #[test] fn variable_like() { assert_eq!( runner().ok("$a: b;\ \n@keyframes $a {\ \n to {\ \n c: d;\ \n }\ \n}\n"), "@keyframes $a {\ \n to {\ \n c: d;\ \n }\ \n}\n" ); } } mod selector { #[allow(unused)] use super::runner; #[test] fn from() { assert_eq!( runner().ok("@keyframes a {\ \n from {\ \n c: d;\ \n }\ \n}\n"), "@keyframes a {\ \n from {\ \n c: d;\ \n }\ \n}\n" ); } #[test] fn interpolated() { assert_eq!( runner().ok("@keyframes a {\ \n $b: 10%;\ \n #{$b} {\ \n c: d;\ \n }\ \n}\n"), "@keyframes a {\ \n 10% {\ \n c: d;\ \n }\ \n}\n" ); } #[test] fn list() { assert_eq!( runner().ok("@keyframes a {\ \n from, 15%, to {\ \n c: d;\ \n }\ \n}\n"), "@keyframes a {\ \n from, 15%, to {\ \n c: d;\ \n }\ \n}\n" ); } mod percentage { #[allow(unused)] use super::runner; #[test] fn double() { assert_eq!( runner().ok("@keyframes a {\ \n 10.3% {\ \n c: d;\ \n }\ \n}\n"), "@keyframes a {\ \n 10.3% {\ \n c: d;\ \n }\ \n}\n" ); } #[test] fn int() { assert_eq!( runner().ok("@keyframes a {\ \n 10% {\ \n c: d;\ \n }\ \n}\n"), "@keyframes a {\ \n 10% {\ \n c: d;\ \n }\ \n}\n" ); } } #[test] fn to() { assert_eq!( runner().ok("@keyframes a {\ \n to {\ \n c: d;\ \n }\ \n}\n"), "@keyframes a {\ \n to {\ \n c: d;\ \n }\ \n}\n" ); } }
20.336898
68
0.272154
9b66ca6cc55c81a5691308fc39c73e5d0b0831bd
3,355
use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StreamEffectiveDate { /// StreamEffectiveDateUnadjusted #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "40907")] pub stream_effective_date_unadjusted: Option<fix_common::LocalMktDate>, /// When specified, this overrides the business day convention defined in the DateAdjustment component in Instrument. The specified /// value would be specific to this instance of the effective date of the stream. #[serde(skip_serializing_if = "Option::is_none")] #[serde(deserialize_with = "fix_common::workarounds::from_opt_str")]// https://github.com/serde-rs/serde/issues/1183 #[serde(default)] #[serde(rename = "40908")] pub stream_effective_date_business_day_convention: Option<i32>, /// When specified, this overrides the business centers defined in the DateAdjustment component in Instrument. The specified values /// would be specific to this instance of the effective date of the stream. #[serde(flatten)] pub stream_effective_business_center_grp: Option<super::stream_effective_business_center_grp::StreamEffectiveBusinessCenterGrp>, /// StreamEffectiveDateRelativeTo #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "40910")] pub stream_effective_date_relative_to: Option<StreamEffectiveDateRelativeTo>, /// Conditionally required when StreamEffectiveDateOffsetUnit(40912) is specified. #[serde(skip_serializing_if = "Option::is_none")] #[serde(deserialize_with = "fix_common::workarounds::from_opt_str")]// https://github.com/serde-rs/serde/issues/1183 #[serde(default)] #[serde(rename = "40911")] pub stream_effective_date_offset_period: Option<i32>, /// Conditionally required when StreamEffectiveDateOffsetPeriod(40911) is specified. #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "40912")] pub stream_effective_date_offset_unit: Option<String>, /// StreamEffectiveDateOffsetDayType #[serde(skip_serializing_if = "Option::is_none")] #[serde(deserialize_with = "fix_common::workarounds::from_opt_str")]// https://github.com/serde-rs/serde/issues/1183 #[serde(default)] #[serde(rename = "40913")] pub stream_effective_date_offset_day_type: Option<i32>, /// StreamEffectiveDateAdjusted #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "40914")] pub stream_effective_date_adjusted: Option<fix_common::LocalMktDate>, } #[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)] pub enum StreamEffectiveDateRelativeTo { /// Trade date #[serde(rename = "0")] TradeDate, /// Settlement date #[serde(rename = "1")] SettlementDate, /// Effective date #[serde(rename = "2")] EffectiveDate, /// Calculation period start date #[serde(rename = "3")] CalculationPeriodStartDate, /// Calculation period end date #[serde(rename = "4")] CalculationPeriodEndDate, /// Reset date #[serde(rename = "5")] ResetDate, /// Last pricing date #[serde(rename = "6")] LastPricingDate, /// Valuation date #[serde(rename = "7")] ValuationDate, /// Cash settlement date #[serde(rename = "8")] CashSettlementDate, /// Option exercise start date #[serde(rename = "9")] OptionExerciseStartDate, } impl Default for StreamEffectiveDateRelativeTo { fn default() -> Self { StreamEffectiveDateRelativeTo::TradeDate } }
39.011628
132
0.755887
1cbf53d874d37f3a5e06d41b3834d1425bfe301f
901
use panoptis_client::rpc_client::RpcClient; use solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig, pubkey::Pubkey}; use std::{thread::sleep, time::Duration}; pub fn check_recent_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) { (0..5).for_each(|tries| { let balance = client .get_balance_with_commitment(pubkey, CommitmentConfig::processed()) .unwrap() .value; if balance == expected_balance { return; } if tries == 4 { assert_eq!(balance, expected_balance); } sleep(Duration::from_millis(500)); }); } pub fn check_ready(rpc_client: &RpcClient) { while rpc_client .get_slot_with_commitment(CommitmentConfig::processed()) .unwrap() < 5 { sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)); } }
30.033333
98
0.628191
feac53ee61a44e399d2a323c49ee1a300eceaaa5
6,893
use std::collections::HashMap; use rustpython_bytecode::bytecode::{CodeObject, Constant, Instruction, NameScope}; use rustpython_jit::{CompiledCode, JitType}; #[derive(Debug, Clone)] pub struct Function { code: Box<CodeObject>, name: String, annotations: HashMap<String, StackValue>, } impl Function { pub fn compile(self) -> CompiledCode { let mut arg_types = Vec::new(); for arg in self.code.arg_names.iter() { let arg_type = match self.annotations.get(arg) { Some(StackValue::String(annotation)) => match annotation.as_str() { "int" => JitType::Int, "float" => JitType::Float, "bool" => JitType::Bool, _ => panic!("Unrecognised jit type"), }, _ => panic!("Argument have annotation"), }; arg_types.push(arg_type); } rustpython_jit::compile(&self.code, &arg_types).expect("Compile failure") } } #[derive(Debug, Clone)] enum StackValue { String(String), None, Map(HashMap<String, StackValue>), Code(Box<CodeObject>), Function(Function), } impl From<Constant> for StackValue { fn from(value: Constant) -> Self { match value { Constant::String { value } => StackValue::String(value), Constant::None => StackValue::None, Constant::Code { code } => StackValue::Code(code), c => unimplemented!("constant {:?} isn't yet supported in py_function!", c), } } } pub struct StackMachine { stack: Vec<StackValue>, locals: HashMap<String, StackValue>, } impl StackMachine { pub fn new() -> StackMachine { StackMachine { stack: Vec::new(), locals: HashMap::new(), } } pub fn run(&mut self, code: CodeObject) { for instruction in code.instructions { if self.process_instruction(instruction) { break; } } } fn process_instruction(&mut self, instruction: Instruction) -> bool { match instruction { Instruction::LoadConst { value } => self.stack.push(value.into()), Instruction::LoadName { name, scope: NameScope::Free, } => self.stack.push(StackValue::String(name)), Instruction::StoreName { name, .. } => { self.locals.insert(name, self.stack.pop().unwrap()); } Instruction::StoreAttr { .. } => { // Do nothing except throw away the stack values self.stack.pop().unwrap(); self.stack.pop().unwrap(); } Instruction::BuildMap { size, .. } => { let mut map = HashMap::new(); for _ in 0..size { let value = self.stack.pop().unwrap(); let name = if let Some(StackValue::String(name)) = self.stack.pop() { name } else { unimplemented!("no string keys isn't yet supported in py_function!") }; map.insert(name, value); } self.stack.push(StackValue::Map(map)); } Instruction::MakeFunction => { let name = if let Some(StackValue::String(name)) = self.stack.pop() { name } else { panic!("Expected function name") }; let code = if let Some(StackValue::Code(code)) = self.stack.pop() { code } else { panic!("Expected function code") }; let annotations = if let Some(StackValue::Map(map)) = self.stack.pop() { map } else { panic!("Expected function annotations") }; self.stack.push(StackValue::Function(Function { name, code, annotations, })); } Instruction::Duplicate => { let value = self.stack.last().unwrap().clone(); self.stack.push(value); } Instruction::Rotate { amount } => { let mut values = Vec::new(); // Pop all values from stack: values.extend(self.stack.drain(self.stack.len() - amount..)); // Push top of stack back first: self.stack.push(values.pop().unwrap()); // Push other value back in order: self.stack.extend(values); } Instruction::ReturnValue => return true, _ => unimplemented!( "instruction {:?} isn't yet supported in py_function!", instruction ), } return false; } pub fn get_function(&self, name: &str) -> Function { if let Some(StackValue::Function(function)) = self.locals.get(name) { function.clone() } else { panic!("There was no function named {}", name) } } } macro_rules! jit_function { ($func_name:ident => $($t:tt)*) => { { let code = rustpython_derive::py_compile!( crate_name = "rustpython_bytecode", source = $($t)* ); let mut machine = $crate::common::StackMachine::new(); machine.run(code); machine.get_function(stringify!($func_name)).compile() } }; ($func_name:ident($($arg_name:ident:$arg_type:ty),*) -> $ret_type:ty => $($t:tt)*) => { { use std::convert::TryInto; let jit_code = jit_function!($func_name => $($t)*); move |$($arg_name:$arg_type),*| -> Result<$ret_type, rustpython_jit::JitArgumentError> { jit_code .invoke(&[$($arg_name.into()),*]) .map(|ret| match ret { Some(ret) => ret.try_into().expect("jit function returned unexpected type"), None => panic!("jit function unexpectedly returned None") }) } } }; ($func_name:ident($($arg_name:ident:$arg_type:ty),*) => $($t:tt)*) => { { let jit_code = jit_function!($func_name => $($t)*); move |$($arg_name:$arg_type),*| -> Result<(), rustpython_jit::JitArgumentError> { jit_code .invoke(&[$($arg_name.into()),*]) .map(|ret| match ret { Some(ret) => panic!("jit function unexpectedly returned a value {:?}", ret), None => () }) } } }; }
34.293532
100
0.480633
ffcfd62cbb3be48c6e81f62c6d0ac008487d3a6c
460
// check-fail // known-bug // This should pass, but unnormalized input args aren't treated as implied. #![feature(generic_associated_types)] trait MyTrait { type Assoc<'a, 'b> where 'b: 'a; fn do_sth(arg: Self::Assoc<'_, '_>); } struct Foo; impl MyTrait for Foo { type Assoc<'a, 'b> where 'b: 'a = u32; fn do_sth(_: u32) {} // fn do_sth(_: Self::Assoc<'static, 'static>) {} // fn do_sth(_: Self::Assoc<'_, '_>) {} } fn main() {}
19.166667
75
0.595652
fe3c286adcb0a0039745933e2dad6690316cc7f9
64,019
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use crate::models::*; use snafu::{ResultExt, Snafu}; pub mod operations { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, ) -> std::result::Result<SaasAppOperationsResponseWithContinuation, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.SaaS/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasAppOperationsResponseWithContinuation = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod saa_s_app_moderneligibility { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<SaasRpModernEligibility, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.SaaS/checkModernEligibility", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasRpModernEligibility = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod applications { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<SaasAppResponseWithContinuation, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SaaS/applications", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasAppResponseWithContinuation = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod saa_s { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn get_resource( operation_config: &crate::OperationConfig, resource_id: &str, ) -> std::result::Result<SaasResource, get_resource::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.SaaS/saasresources/{}", operation_config.base_path(), resource_id ); let mut url = url::Url::parse(url_str).context(get_resource::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get_resource::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get_resource::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get_resource::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResource = serde_json::from_slice(rsp_body).context(get_resource::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(get_resource::DeserializeError { body: rsp_body.clone() })?; get_resource::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get_resource { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update_resource( operation_config: &crate::OperationConfig, resource_id: &str, parameters: &SaasResourceCreation, ) -> std::result::Result<update_resource::Response, update_resource::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.SaaS/saasresources/{}", operation_config.base_path(), resource_id ); let mut url = url::Url::parse(url_str).context(update_resource::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update_resource::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update_resource::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(update_resource::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResource = serde_json::from_slice(rsp_body).context(update_resource::DeserializeError { body: rsp_body.clone() })?; Ok(update_resource::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(update_resource::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(update_resource::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(update_resource::DeserializeError { body: rsp_body.clone() })?; update_resource::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update_resource { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(SaasResource), Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_id: &str, parameters: &DeleteOptions, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.SaaS/saasresources/{}", operation_config.base_path(), resource_id ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), http::StatusCode::OK => Ok(delete::Response::Ok200), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { NoContent204, Ok200, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_resource( operation_config: &crate::OperationConfig, parameters: &SaasResourceCreation, ) -> std::result::Result<create_resource::Response, create_resource::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.SaaS/saasresources", operation_config.base_path(),); let mut url = url::Url::parse(url_str).context(create_resource::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_resource::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_resource::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_resource::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResource = serde_json::from_slice(rsp_body).context(create_resource::DeserializeError { body: rsp_body.clone() })?; Ok(create_resource::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create_resource::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(create_resource::DeserializeError { body: rsp_body.clone() })?; create_resource::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_resource { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(SaasResource), Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod saas_resources { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<SaasResourceResponseWithContinuation, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.SaaS/saasresources", operation_config.base_path(),); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResourceResponseWithContinuation = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub async fn saas_resource_list_access_token( operation_config: &crate::OperationConfig, resource_id: &str, ) -> std::result::Result<AccessTokenResult, saas_resource_list_access_token::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.SaaS/saasresources/{}/listAccessToken", operation_config.base_path(), resource_id ); let mut url = url::Url::parse(url_str).context(saas_resource_list_access_token::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(saas_resource_list_access_token::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(saas_resource_list_access_token::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(saas_resource_list_access_token::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AccessTokenResult = serde_json::from_slice(rsp_body).context(saas_resource_list_access_token::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(saas_resource_list_access_token::DeserializeError { body: rsp_body.clone() })?; saas_resource_list_access_token::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod saas_resource_list_access_token { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub mod saas_subscription_level { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list_by_azure_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<SaasResourceResponseWithContinuation, list_by_azure_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.SaaS/resources", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).context(list_by_azure_subscription::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_by_azure_subscription::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_by_azure_subscription::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(list_by_azure_subscription::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResourceResponseWithContinuation = serde_json::from_slice(rsp_body).context(list_by_azure_subscription::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(list_by_azure_subscription::DeserializeError { body: rsp_body.clone() })?; list_by_azure_subscription::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_azure_subscription { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<SaasResourceResponseWithContinuation, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SaaS/resources", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).context(list_by_resource_group::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_by_resource_group::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResourceResponseWithContinuation = serde_json::from_slice(rsp_body).context(list_by_resource_group::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(list_by_resource_group::DeserializeError { body: rsp_body.clone() })?; list_by_resource_group::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_name: &str, ) -> std::result::Result<SaasResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SaaS/resources/{}", operation_config.base_path(), subscription_id, resource_group_name, resource_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResource = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_name: &str, parameters: &SaasResourceCreation, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SaaS/resources/{}", operation_config.base_path(), subscription_id, resource_group_name, resource_name ); let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_or_update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResource = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(create_or_update::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(SaasResource), Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_name: &str, parameters: &SaasResourceCreation, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SaaS/resources/{}", operation_config.base_path(), subscription_id, resource_group_name, resource_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SaasResource = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(update::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(update::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(SaasResource), Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SaaS/resources/{}", operation_config.base_path(), subscription_id, resource_group_name, resource_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update_to_unsubscribed( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_name: &str, parameters: &DeleteOptions, ) -> std::result::Result<update_to_unsubscribed::Response, update_to_unsubscribed::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SaaS/resources/{}/unsubscribe", operation_config.base_path(), subscription_id, resource_group_name, resource_name ); let mut url = url::Url::parse(url_str).context(update_to_unsubscribed::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update_to_unsubscribed::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update_to_unsubscribed::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(update_to_unsubscribed::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(update_to_unsubscribed::Response::Ok200), http::StatusCode::ACCEPTED => Ok(update_to_unsubscribed::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(update_to_unsubscribed::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(update_to_unsubscribed::DeserializeError { body: rsp_body.clone() })?; update_to_unsubscribed::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update_to_unsubscribed { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_access_token( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_name: &str, ) -> std::result::Result<AccessTokenResult, list_access_token::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SaaS/resources/{}/listAccessToken", operation_config.base_path(), subscription_id, resource_group_name, resource_name ); let mut url = url::Url::parse(url_str).context(list_access_token::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_access_token::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_access_token::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(list_access_token::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AccessTokenResult = serde_json::from_slice(rsp_body).context(list_access_token::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(list_access_token::DeserializeError { body: rsp_body.clone() })?; list_access_token::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_access_token { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn validate_move_resources( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, move_resource_parameter: &MoveResource, ) -> std::result::Result<(), validate_move_resources::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/validateMoveResources", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).context(validate_move_resources::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(validate_move_resources::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(validate_move_resources::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(validate_move_resources::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(validate_move_resources::DeserializeError { body: rsp_body.clone() })?; validate_move_resources::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod validate_move_resources { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn move_resources( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, move_resource_parameter: &MoveResource, ) -> std::result::Result<move_resources::Response, move_resources::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/moveResources", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).context(move_resources::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(move_resources::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(move_resources::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(move_resources::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(move_resources::Response::Ok200), http::StatusCode::ACCEPTED => Ok(move_resources::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).context(move_resources::DeserializeError { body: rsp_body.clone() })?; move_resources::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod move_resources { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } }
41.436246
140
0.556835
fca9d69ed796da7f03211ea9fc0e6e080033c404
45,601
use super::Array; use crate::builtins::Number; use crate::{forward, Context, Value}; #[test] fn is_array() { let mut context = Context::new(); let init = r#" var empty = []; var new_arr = new Array(); var many = ["a", "b", "c"]; "#; context.eval(init).unwrap(); assert_eq!( context.eval("Array.isArray(empty)").unwrap(), Value::Boolean(true) ); assert_eq!( context.eval("Array.isArray(new_arr)").unwrap(), Value::Boolean(true) ); assert_eq!( context.eval("Array.isArray(many)").unwrap(), Value::Boolean(true) ); assert_eq!( context.eval("Array.isArray([1, 2, 3])").unwrap(), Value::Boolean(true) ); assert_eq!( context.eval("Array.isArray([])").unwrap(), Value::Boolean(true) ); assert_eq!( context.eval("Array.isArray({})").unwrap(), Value::Boolean(false) ); // assert_eq!(context.eval("Array.isArray(new Array)"), "true"); assert_eq!( context.eval("Array.isArray()").unwrap(), Value::Boolean(false) ); assert_eq!( context .eval("Array.isArray({ constructor: Array })") .unwrap(), Value::Boolean(false) ); assert_eq!( context .eval("Array.isArray({ push: Array.prototype.push, concat: Array.prototype.concat })") .unwrap(), Value::Boolean(false) ); assert_eq!( context.eval("Array.isArray(17)").unwrap(), Value::Boolean(false) ); assert_eq!( context .eval("Array.isArray({ __proto__: Array.prototype })") .unwrap(), Value::Boolean(false) ); assert_eq!( context.eval("Array.isArray({ length: 0 })").unwrap(), Value::Boolean(false) ); } #[test] fn of() { let mut context = Context::new(); assert_eq!( context .eval("Array.of(1, 2, 3)") .unwrap() .to_string(&mut context) .unwrap(), context .eval("[1, 2, 3]") .unwrap() .to_string(&mut context) .unwrap() ); assert_eq!( context .eval("Array.of(1, 'a', [], undefined, null)") .unwrap() .to_string(&mut context) .unwrap(), context .eval("[1, 'a', [], undefined, null]") .unwrap() .to_string(&mut context) .unwrap() ); assert_eq!( context .eval("Array.of()") .unwrap() .to_string(&mut context) .unwrap(), context.eval("[]").unwrap().to_string(&mut context).unwrap() ); context .eval(r#"let a = Array.of.call(Date, "a", undefined, 3);"#) .unwrap(); assert_eq!( context.eval("a instanceof Date").unwrap(), Value::from(true) ); assert_eq!(context.eval("a[0]").unwrap(), Value::from("a")); assert_eq!(context.eval("a[1]").unwrap(), Value::undefined()); assert_eq!(context.eval("a[2]").unwrap(), Value::from(3)); assert_eq!(context.eval("a.length").unwrap(), Value::from(3)); } #[ignore] fn concat() { //TODO: array display formatter let mut context = Context::new(); let init = r#" var empty = new Array(); var one = new Array(1); "#; context.eval(init).unwrap(); // Empty ++ Empty let ee = context .eval("empty.concat(empty)") .unwrap() .to_string(&mut context) .unwrap(); assert_eq!(ee, "[]"); // Empty ++ NonEmpty let en = context .eval("empty.concat(one)") .unwrap() .to_string(&mut context) .unwrap(); assert_eq!(en, "[a]"); // NonEmpty ++ Empty let ne = context .eval("one.concat(empty)") .unwrap() .to_string(&mut context) .unwrap(); assert_eq!(ne, "a.b.c"); // NonEmpty ++ NonEmpty let nn = context .eval("one.concat(one)") .unwrap() .to_string(&mut context) .unwrap(); assert_eq!(nn, "a.b.c"); } #[test] fn join() { let mut context = Context::new(); let init = r#" var empty = [ ]; var one = ["a"]; var many = ["a", "b", "c"]; "#; eprintln!("{}", forward(&mut context, init)); // Empty let empty = forward(&mut context, "empty.join('.')"); assert_eq!(empty, String::from("\"\"")); // One let one = forward(&mut context, "one.join('.')"); assert_eq!(one, String::from("\"a\"")); // Many let many = forward(&mut context, "many.join('.')"); assert_eq!(many, String::from("\"a.b.c\"")); } #[test] fn to_string() { let mut context = Context::new(); let init = r#" var empty = [ ]; var one = ["a"]; var many = ["a", "b", "c"]; "#; eprintln!("{}", forward(&mut context, init)); // Empty let empty = forward(&mut context, "empty.toString()"); assert_eq!(empty, String::from("\"\"")); // One let one = forward(&mut context, "one.toString()"); assert_eq!(one, String::from("\"a\"")); // Many let many = forward(&mut context, "many.toString()"); assert_eq!(many, String::from("\"a,b,c\"")); } #[test] fn every() { let mut context = Context::new(); // taken from https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/every let init = r#" var empty = []; var array = [11, 23, 45]; function callback(element) { return element > 10; } function callback2(element) { return element < 10; } var appendArray = [1,2,3,4]; function appendingCallback(elem,index,arr) { arr.push('new'); return elem !== "new"; } var delArray = [1,2,3,4]; function deletingCallback(elem,index,arr) { arr.pop() return elem < 3; } "#; eprintln!("{}", forward(&mut context, init)); let result = forward(&mut context, "array.every(callback);"); assert_eq!(result, "true"); let result = forward(&mut context, "empty.every(callback);"); assert_eq!(result, "true"); let result = forward(&mut context, "array.every(callback2);"); assert_eq!(result, "false"); let result = forward(&mut context, "appendArray.every(appendingCallback);"); assert_eq!(result, "true"); let result = forward(&mut context, "delArray.every(deletingCallback);"); assert_eq!(result, "true"); } #[test] fn find() { let mut context = Context::new(); let init = r#" function comp(a) { return a == "a"; } var many = ["a", "b", "c"]; "#; eprintln!("{}", forward(&mut context, init)); let found = forward(&mut context, "many.find(comp)"); assert_eq!(found, String::from("\"a\"")); } #[test] fn find_index() { let mut context = Context::new(); let code = r#" function comp(item) { return item == 2; } var many = [1, 2, 3]; var empty = []; var missing = [4, 5, 6]; "#; forward(&mut context, code); let many = forward(&mut context, "many.findIndex(comp)"); assert_eq!(many, String::from("1")); let empty = forward(&mut context, "empty.findIndex(comp)"); assert_eq!(empty, String::from("-1")); let missing = forward(&mut context, "missing.findIndex(comp)"); assert_eq!(missing, String::from("-1")); } #[test] fn flat() { let mut context = Context::new(); let code = r#" var depth1 = ['a', ['b', 'c']]; var flat_depth1 = depth1.flat(); var depth2 = ['a', ['b', ['c'], 'd']]; var flat_depth2 = depth2.flat(2); "#; forward(&mut context, code); assert_eq!(forward(&mut context, "flat_depth1[0]"), "\"a\""); assert_eq!(forward(&mut context, "flat_depth1[1]"), "\"b\""); assert_eq!(forward(&mut context, "flat_depth1[2]"), "\"c\""); assert_eq!(forward(&mut context, "flat_depth1.length"), "3"); assert_eq!(forward(&mut context, "flat_depth2[0]"), "\"a\""); assert_eq!(forward(&mut context, "flat_depth2[1]"), "\"b\""); assert_eq!(forward(&mut context, "flat_depth2[2]"), "\"c\""); assert_eq!(forward(&mut context, "flat_depth2[3]"), "\"d\""); assert_eq!(forward(&mut context, "flat_depth2.length"), "4"); } #[test] fn flat_empty() { let mut context = Context::new(); let code = r#" var empty = [[]]; var flat_empty = empty.flat(); "#; forward(&mut context, code); assert_eq!(forward(&mut context, "flat_empty.length"), "0"); } #[test] fn flat_infinity() { let mut context = Context::new(); let code = r#" var arr = [[[[[['a']]]]]]; var flat_arr = arr.flat(Infinity) "#; forward(&mut context, code); assert_eq!(forward(&mut context, "flat_arr[0]"), "\"a\""); assert_eq!(forward(&mut context, "flat_arr.length"), "1"); } #[test] fn flat_map() { let mut context = Context::new(); let code = r#" var double = [1, 2, 3]; var double_flatmap = double.flatMap(i => [i * 2]); var sentence = ["it's Sunny", "in Cali"]; var flat_split_sentence = sentence.flatMap(x => x.split(" ")); "#; forward(&mut context, code); assert_eq!(forward(&mut context, "double_flatmap[0]"), "2"); assert_eq!(forward(&mut context, "double_flatmap[1]"), "4"); assert_eq!(forward(&mut context, "double_flatmap[2]"), "6"); assert_eq!(forward(&mut context, "double_flatmap.length"), "3"); assert_eq!(forward(&mut context, "flat_split_sentence[0]"), "\"it's\""); assert_eq!(forward(&mut context, "flat_split_sentence[1]"), "\"Sunny\""); assert_eq!(forward(&mut context, "flat_split_sentence[2]"), "\"in\""); assert_eq!(forward(&mut context, "flat_split_sentence[3]"), "\"Cali\""); assert_eq!(forward(&mut context, "flat_split_sentence.length"), "4"); } #[test] fn flat_map_with_hole() { let mut context = Context::new(); let code = r#" var arr = [0, 1, 2]; delete arr[1]; var arr_flattened = arr.flatMap(i => [i * 2]); "#; forward(&mut context, code); assert_eq!(forward(&mut context, "arr_flattened[0]"), "0"); assert_eq!(forward(&mut context, "arr_flattened[1]"), "4"); assert_eq!(forward(&mut context, "arr_flattened.length"), "2"); } #[test] fn flat_map_not_callable() { let mut context = Context::new(); let code = r#" try { var array = [1,2,3]; array.flatMap("not a function"); } catch (err) { err.name === "TypeError" } "#; assert_eq!(forward(&mut context, code), "true"); } #[test] fn push() { let mut context = Context::new(); let init = r#" var arr = [1, 2]; "#; eprintln!("{}", forward(&mut context, init)); assert_eq!(forward(&mut context, "arr.push()"), "2"); assert_eq!(forward(&mut context, "arr.push(3, 4)"), "4"); assert_eq!(forward(&mut context, "arr[2]"), "3"); assert_eq!(forward(&mut context, "arr[3]"), "4"); } #[test] fn pop() { let mut context = Context::new(); let init = r#" var empty = [ ]; var one = [1]; var many = [1, 2, 3, 4]; "#; eprintln!("{}", forward(&mut context, init)); assert_eq!( forward(&mut context, "empty.pop()"), String::from("undefined") ); assert_eq!(forward(&mut context, "one.pop()"), "1"); assert_eq!(forward(&mut context, "one.length"), "0"); assert_eq!(forward(&mut context, "many.pop()"), "4"); assert_eq!(forward(&mut context, "many[0]"), "1"); assert_eq!(forward(&mut context, "many.length"), "3"); } #[test] fn shift() { let mut context = Context::new(); let init = r#" var empty = [ ]; var one = [1]; var many = [1, 2, 3, 4]; "#; eprintln!("{}", forward(&mut context, init)); assert_eq!( forward(&mut context, "empty.shift()"), String::from("undefined") ); assert_eq!(forward(&mut context, "one.shift()"), "1"); assert_eq!(forward(&mut context, "one.length"), "0"); assert_eq!(forward(&mut context, "many.shift()"), "1"); assert_eq!(forward(&mut context, "many[0]"), "2"); assert_eq!(forward(&mut context, "many.length"), "3"); } #[test] fn unshift() { let mut context = Context::new(); let init = r#" var arr = [3, 4]; "#; eprintln!("{}", forward(&mut context, init)); assert_eq!(forward(&mut context, "arr.unshift()"), "2"); assert_eq!(forward(&mut context, "arr.unshift(1, 2)"), "4"); assert_eq!(forward(&mut context, "arr[0]"), "1"); assert_eq!(forward(&mut context, "arr[1]"), "2"); } #[test] fn reverse() { let mut context = Context::new(); let init = r#" var arr = [1, 2]; var reversed = arr.reverse(); "#; eprintln!("{}", forward(&mut context, init)); assert_eq!(forward(&mut context, "reversed[0]"), "2"); assert_eq!(forward(&mut context, "reversed[1]"), "1"); assert_eq!(forward(&mut context, "arr[0]"), "2"); assert_eq!(forward(&mut context, "arr[1]"), "1"); } #[test] fn index_of() { let mut context = Context::new(); let init = r#" var empty = [ ]; var one = ["a"]; var many = ["a", "b", "c"]; var duplicates = ["a", "b", "c", "a", "b"]; "#; eprintln!("{}", forward(&mut context, init)); // Empty let empty = forward(&mut context, "empty.indexOf('a')"); assert_eq!(empty, String::from("-1")); // One let one = forward(&mut context, "one.indexOf('a')"); assert_eq!(one, String::from("0")); // Missing from one let missing_from_one = forward(&mut context, "one.indexOf('b')"); assert_eq!(missing_from_one, String::from("-1")); // First in many let first_in_many = forward(&mut context, "many.indexOf('a')"); assert_eq!(first_in_many, String::from("0")); // Second in many let second_in_many = forward(&mut context, "many.indexOf('b')"); assert_eq!(second_in_many, String::from("1")); // First in duplicates let first_in_many = forward(&mut context, "duplicates.indexOf('a')"); assert_eq!(first_in_many, String::from("0")); // Second in duplicates let second_in_many = forward(&mut context, "duplicates.indexOf('b')"); assert_eq!(second_in_many, String::from("1")); // Positive fromIndex greater than array length let fromindex_greater_than_length = forward(&mut context, "one.indexOf('a', 2)"); assert_eq!(fromindex_greater_than_length, String::from("-1")); // Positive fromIndex missed match let fromindex_misses_match = forward(&mut context, "many.indexOf('a', 1)"); assert_eq!(fromindex_misses_match, String::from("-1")); // Positive fromIndex matched let fromindex_matches = forward(&mut context, "many.indexOf('b', 1)"); assert_eq!(fromindex_matches, String::from("1")); // Positive fromIndex with duplicates let first_in_many = forward(&mut context, "duplicates.indexOf('a', 1)"); assert_eq!(first_in_many, String::from("3")); // Negative fromIndex greater than array length let fromindex_greater_than_length = forward(&mut context, "one.indexOf('a', -2)"); assert_eq!(fromindex_greater_than_length, String::from("0")); // Negative fromIndex missed match let fromindex_misses_match = forward(&mut context, "many.indexOf('b', -1)"); assert_eq!(fromindex_misses_match, String::from("-1")); // Negative fromIndex matched let fromindex_matches = forward(&mut context, "many.indexOf('c', -1)"); assert_eq!(fromindex_matches, String::from("2")); // Negative fromIndex with duplicates let second_in_many = forward(&mut context, "duplicates.indexOf('b', -2)"); assert_eq!(second_in_many, String::from("4")); } #[test] fn last_index_of() { let mut context = Context::new(); let init = r#" var empty = [ ]; var one = ["a"]; var many = ["a", "b", "c"]; var duplicates = ["a", "b", "c", "a", "b"]; "#; eprintln!("{}", forward(&mut context, init)); // Empty let empty = forward(&mut context, "empty.lastIndexOf('a')"); assert_eq!(empty, String::from("-1")); // One let one = forward(&mut context, "one.lastIndexOf('a')"); assert_eq!(one, String::from("0")); // Missing from one let missing_from_one = forward(&mut context, "one.lastIndexOf('b')"); assert_eq!(missing_from_one, String::from("-1")); // First in many let first_in_many = forward(&mut context, "many.lastIndexOf('a')"); assert_eq!(first_in_many, String::from("0")); // Second in many let second_in_many = forward(&mut context, "many.lastIndexOf('b')"); assert_eq!(second_in_many, String::from("1")); // 4th in duplicates let first_in_many = forward(&mut context, "duplicates.lastIndexOf('a')"); assert_eq!(first_in_many, String::from("3")); // 5th in duplicates let second_in_many = forward(&mut context, "duplicates.lastIndexOf('b')"); assert_eq!(second_in_many, String::from("4")); // Positive fromIndex greater than array length let fromindex_greater_than_length = forward(&mut context, "one.lastIndexOf('a', 2)"); assert_eq!(fromindex_greater_than_length, String::from("0")); // Positive fromIndex missed match let fromindex_misses_match = forward(&mut context, "many.lastIndexOf('c', 1)"); assert_eq!(fromindex_misses_match, String::from("-1")); // Positive fromIndex matched let fromindex_matches = forward(&mut context, "many.lastIndexOf('b', 1)"); assert_eq!(fromindex_matches, String::from("1")); // Positive fromIndex with duplicates let first_in_many = forward(&mut context, "duplicates.lastIndexOf('a', 1)"); assert_eq!(first_in_many, String::from("0")); // Negative fromIndex greater than array length let fromindex_greater_than_length = forward(&mut context, "one.lastIndexOf('a', -2)"); assert_eq!(fromindex_greater_than_length, String::from("-1")); // Negative fromIndex missed match let fromindex_misses_match = forward(&mut context, "many.lastIndexOf('c', -2)"); assert_eq!(fromindex_misses_match, String::from("-1")); // Negative fromIndex matched let fromindex_matches = forward(&mut context, "many.lastIndexOf('c', -1)"); assert_eq!(fromindex_matches, String::from("2")); // Negative fromIndex with duplicates let second_in_many = forward(&mut context, "duplicates.lastIndexOf('b', -2)"); assert_eq!(second_in_many, String::from("1")); } #[test] fn fill_obj_ref() { let mut context = Context::new(); // test object reference forward(&mut context, "a = (new Array(3)).fill({});"); forward(&mut context, "a[0].hi = 'hi';"); assert_eq!(forward(&mut context, "a[0].hi"), "\"hi\""); } #[test] fn fill() { let mut context = Context::new(); forward(&mut context, "var a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4).join()"), String::from("\"4,4,4\"") ); // make sure the array is modified assert_eq!(forward(&mut context, "a.join()"), String::from("\"4,4,4\"")); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, '1').join()"), String::from("\"1,4,4\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, 1, 2).join()"), String::from("\"1,4,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, 1, 1).join()"), String::from("\"1,2,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, 3, 3).join()"), String::from("\"1,2,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, -3, -2).join()"), String::from("\"4,2,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, NaN, NaN).join()"), String::from("\"1,2,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, 3, 5).join()"), String::from("\"1,2,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, '1.2', '2.5').join()"), String::from("\"1,4,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, 'str').join()"), String::from("\"4,4,4\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, 'str', 'str').join()"), String::from("\"1,2,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, undefined, null).join()"), String::from("\"1,2,3\"") ); forward(&mut context, "a = [1, 2, 3];"); assert_eq!( forward(&mut context, "a.fill(4, undefined, undefined).join()"), String::from("\"4,4,4\"") ); assert_eq!( forward(&mut context, "a.fill().join()"), String::from("\"undefined,undefined,undefined\"") ); // test object reference forward(&mut context, "a = (new Array(3)).fill({});"); forward(&mut context, "a[0].hi = 'hi';"); assert_eq!(forward(&mut context, "a[0].hi"), String::from("\"hi\"")); } #[test] fn includes_value() { let mut context = Context::new(); let init = r#" var empty = [ ]; var one = ["a"]; var many = ["a", "b", "c"]; var duplicates = ["a", "b", "c", "a", "b"]; var undefined = [undefined]; "#; eprintln!("{}", forward(&mut context, init)); // Empty let empty = forward(&mut context, "empty.includes('a')"); assert_eq!(empty, String::from("false")); // One let one = forward(&mut context, "one.includes('a')"); assert_eq!(one, String::from("true")); // Missing from one let missing_from_one = forward(&mut context, "one.includes('b')"); assert_eq!(missing_from_one, String::from("false")); // In many let first_in_many = forward(&mut context, "many.includes('c')"); assert_eq!(first_in_many, String::from("true")); // Missing from many let second_in_many = forward(&mut context, "many.includes('d')"); assert_eq!(second_in_many, String::from("false")); // In duplicates let first_in_many = forward(&mut context, "duplicates.includes('a')"); assert_eq!(first_in_many, String::from("true")); // Missing from duplicates let second_in_many = forward(&mut context, "duplicates.includes('d')"); assert_eq!(second_in_many, String::from("false")); } #[test] fn map() { let mut context = Context::new(); let js = r#" var empty = []; var one = ["x"]; var many = ["x", "y", "z"]; // TODO: uncomment when `this` has been implemented // var _this = { answer: 42 }; // function callbackThatUsesThis() { // return 'The answer to life is: ' + this.answer; // } var empty_mapped = empty.map(v => v + '_'); var one_mapped = one.map(v => '_' + v); var many_mapped = many.map(v => '_' + v + '_'); "#; forward(&mut context, js); // assert the old arrays have not been modified assert_eq!(forward(&mut context, "one[0]"), String::from("\"x\"")); assert_eq!( forward(&mut context, "many[2] + many[1] + many[0]"), String::from("\"zyx\"") ); // NB: These tests need to be rewritten once `Display` has been implemented for `Array` // Empty assert_eq!( forward(&mut context, "empty_mapped.length"), String::from("0") ); // One assert_eq!( forward(&mut context, "one_mapped.length"), String::from("1") ); assert_eq!( forward(&mut context, "one_mapped[0]"), String::from("\"_x\"") ); // Many assert_eq!( forward(&mut context, "many_mapped.length"), String::from("3") ); assert_eq!( forward( &mut context, "many_mapped[0] + many_mapped[1] + many_mapped[2]" ), String::from("\"_x__y__z_\"") ); // TODO: uncomment when `this` has been implemented // One but it uses `this` inside the callback // let one_with_this = forward(&mut context, "one.map(callbackThatUsesThis, _this)[0];"); // assert_eq!(one_with_this, String::from("The answer to life is: 42")) } #[test] fn slice() { let mut context = Context::new(); let init = r#" var empty = [ ].slice(); var one = ["a"].slice(); var many1 = ["a", "b", "c", "d"].slice(1); var many2 = ["a", "b", "c", "d"].slice(2, 3); var many3 = ["a", "b", "c", "d"].slice(7); "#; eprintln!("{}", forward(&mut context, init)); assert_eq!(forward(&mut context, "empty.length"), "0"); assert_eq!(forward(&mut context, "one[0]"), "\"a\""); assert_eq!(forward(&mut context, "many1[0]"), "\"b\""); assert_eq!(forward(&mut context, "many1[1]"), "\"c\""); assert_eq!(forward(&mut context, "many1[2]"), "\"d\""); assert_eq!(forward(&mut context, "many1.length"), "3"); assert_eq!(forward(&mut context, "many2[0]"), "\"c\""); assert_eq!(forward(&mut context, "many2.length"), "1"); assert_eq!(forward(&mut context, "many3.length"), "0"); } #[test] fn for_each() { let mut context = Context::new(); let init = r#" var a = [2, 3, 4, 5]; var sum = 0; var indexSum = 0; var listLengthSum = 0; function callingCallback(item, index, list) { sum += item; indexSum += index; listLengthSum += list.length; } a.forEach(callingCallback); "#; eprintln!("{}", forward(&mut context, init)); assert_eq!(forward(&mut context, "sum"), "14"); assert_eq!(forward(&mut context, "indexSum"), "6"); assert_eq!(forward(&mut context, "listLengthSum"), "16"); } #[test] fn for_each_push_value() { let mut context = Context::new(); let init = r#" var a = [1, 2, 3, 4]; function callingCallback(item, index, list) { list.push(item * 2); } a.forEach(callingCallback); "#; eprintln!("{}", forward(&mut context, init)); // [ 1, 2, 3, 4, 2, 4, 6, 8 ] assert_eq!(forward(&mut context, "a.length"), "8"); assert_eq!(forward(&mut context, "a[4]"), "2"); assert_eq!(forward(&mut context, "a[5]"), "4"); assert_eq!(forward(&mut context, "a[6]"), "6"); assert_eq!(forward(&mut context, "a[7]"), "8"); } #[test] fn filter() { let mut context = Context::new(); let js = r#" var empty = []; var one = ["1"]; var many = ["1", "0", "1"]; var empty_filtered = empty.filter(v => v === "1"); var one_filtered = one.filter(v => v === "1"); var zero_filtered = one.filter(v => v === "0"); var many_one_filtered = many.filter(v => v === "1"); var many_zero_filtered = many.filter(v => v === "0"); "#; forward(&mut context, js); // assert the old arrays have not been modified assert_eq!(forward(&mut context, "one[0]"), String::from("\"1\"")); assert_eq!( forward(&mut context, "many[2] + many[1] + many[0]"), String::from("\"101\"") ); // NB: These tests need to be rewritten once `Display` has been implemented for `Array` // Empty assert_eq!( forward(&mut context, "empty_filtered.length"), String::from("0") ); // One filtered on "1" assert_eq!( forward(&mut context, "one_filtered.length"), String::from("1") ); assert_eq!( forward(&mut context, "one_filtered[0]"), String::from("\"1\"") ); // One filtered on "0" assert_eq!( forward(&mut context, "zero_filtered.length"), String::from("0") ); // Many filtered on "1" assert_eq!( forward(&mut context, "many_one_filtered.length"), String::from("2") ); assert_eq!( forward(&mut context, "many_one_filtered[0] + many_one_filtered[1]"), String::from("\"11\"") ); // Many filtered on "0" assert_eq!( forward(&mut context, "many_zero_filtered.length"), String::from("1") ); assert_eq!( forward(&mut context, "many_zero_filtered[0]"), String::from("\"0\"") ); } #[test] fn some() { let mut context = Context::new(); let init = r#" var empty = []; var array = [11, 23, 45]; function lessThan10(element) { return element > 10; } function greaterThan10(element) { return element < 10; } // Cases where callback mutates the array. var appendArray = [1,2,3,4]; function appendingCallback(elem,index,arr) { arr.push('new'); return elem !== "new"; } var delArray = [1,2,3,4]; function deletingCallback(elem,index,arr) { arr.pop() return elem < 3; } "#; forward(&mut context, init); let result = forward(&mut context, "array.some(lessThan10);"); assert_eq!(result, "true"); let result = forward(&mut context, "empty.some(lessThan10);"); assert_eq!(result, "false"); let result = forward(&mut context, "array.some(greaterThan10);"); assert_eq!(result, "false"); let result = forward(&mut context, "appendArray.some(appendingCallback);"); let append_array_length = forward(&mut context, "appendArray.length"); assert_eq!(append_array_length, "5"); assert_eq!(result, "true"); let result = forward(&mut context, "delArray.some(deletingCallback);"); let del_array_length = forward(&mut context, "delArray.length"); assert_eq!(del_array_length, "3"); assert_eq!(result, "true"); } #[test] fn reduce() { let mut context = Context::new(); let init = r#" var arr = [1, 2, 3, 4]; function add(acc, x) { return acc + x; } function addIdx(acc, _, idx) { return acc + idx; } function addLen(acc, _x, _idx, arr) { return acc + arr.length; } function addResize(acc, x, idx, arr) { if(idx == 0) { arr.length = 3; } return acc + x; } var delArray = [1, 2, 3, 4, 5]; delete delArray[0]; delete delArray[1]; delete delArray[3]; "#; forward(&mut context, init); // empty array let result = forward(&mut context, "[].reduce(add, 0)"); assert_eq!(result, "0"); // simple with initial value let result = forward(&mut context, "arr.reduce(add, 0)"); assert_eq!(result, "10"); // without initial value let result = forward(&mut context, "arr.reduce(add)"); assert_eq!(result, "10"); // with some items missing let result = forward(&mut context, "delArray.reduce(add, 0)"); assert_eq!(result, "8"); // with index let result = forward(&mut context, "arr.reduce(addIdx, 0)"); assert_eq!(result, "6"); // with array let result = forward(&mut context, "arr.reduce(addLen, 0)"); assert_eq!(result, "16"); // resizing the array as reduce progresses let result = forward(&mut context, "arr.reduce(addResize, 0)"); assert_eq!(result, "6"); // Empty array let result = forward( &mut context, r#" try { [].reduce((acc, x) => acc + x); } catch(e) { e.message } "#, ); assert_eq!( result, "\"Reduce was called on an empty array and with no initial value\"" ); // Array with no defined elements let result = forward( &mut context, r#" try { var arr = [0, 1]; delete arr[0]; delete arr[1]; arr.reduce((acc, x) => acc + x); } catch(e) { e.message } "#, ); assert_eq!( result, "\"Reduce was called on an empty array and with no initial value\"" ); // No callback let result = forward( &mut context, r#" try { arr.reduce(""); } catch(e) { e.message } "#, ); assert_eq!(result, "\"Reduce was called without a callback\""); } #[test] fn reduce_right() { let mut context = Context::new(); let init = r#" var arr = [1, 2, 3, 4]; function sub(acc, x) { return acc - x; } function subIdx(acc, _, idx) { return acc - idx; } function subLen(acc, _x, _idx, arr) { return acc - arr.length; } function subResize(acc, x, idx, arr) { if(idx == arr.length - 1) { arr.length = 1; } return acc - x; } function subResize0(acc, x, idx, arr) { if(idx == arr.length - 2) { arr.length = 0; } return acc - x; } var delArray = [1, 2, 3, 4, 5]; delete delArray[0]; delete delArray[1]; delete delArray[3]; "#; forward(&mut context, init); // empty array let result = forward(&mut context, "[].reduceRight(sub, 0)"); assert_eq!(result, "0"); // simple with initial value let result = forward(&mut context, "arr.reduceRight(sub, 0)"); assert_eq!(result, "-10"); // without initial value let result = forward(&mut context, "arr.reduceRight(sub)"); assert_eq!(result, "-2"); // with some items missing let result = forward(&mut context, "delArray.reduceRight(sub, 0)"); assert_eq!(result, "-8"); // with index let result = forward(&mut context, "arr.reduceRight(subIdx)"); assert_eq!(result, "1"); // with array let result = forward(&mut context, "arr.reduceRight(subLen)"); assert_eq!(result, "-8"); // resizing the array as reduce progresses let result = forward(&mut context, "arr.reduceRight(subResize, 0)"); assert_eq!(result, "-5"); // reset array forward(&mut context, "arr = [1, 2, 3, 4];"); // resizing the array to 0 as reduce progresses let result = forward(&mut context, "arr.reduceRight(subResize0, 0)"); assert_eq!(result, "-7"); // Empty array let result = forward( &mut context, r#" try { [].reduceRight((acc, x) => acc + x); } catch(e) { e.message } "#, ); assert_eq!( result, "\"reduceRight was called on an empty array and with no initial value\"" ); // Array with no defined elements let result = forward( &mut context, r#" try { var arr = [0, 1]; delete arr[0]; delete arr[1]; arr.reduceRight((acc, x) => acc + x); } catch(e) { e.message } "#, ); assert_eq!( result, "\"reduceRight was called on an empty array and with no initial value\"" ); // No callback let result = forward( &mut context, r#" try { arr.reduceRight(""); } catch(e) { e.message } "#, ); assert_eq!(result, "\"reduceRight was called without a callback\""); } #[test] fn call_array_constructor_with_one_argument() { let mut context = Context::new(); let init = r#" var empty = new Array(0); var five = new Array(5); var one = new Array("Hello, world!"); "#; forward(&mut context, init); // let result = forward(&mut context, "empty.length"); // assert_eq!(result, "0"); // let result = forward(&mut context, "five.length"); // assert_eq!(result, "5"); // let result = forward(&mut context, "one.length"); // assert_eq!(result, "1"); } #[test] fn array_values_simple() { let mut context = Context::new(); let init = r#" var iterator = [1, 2, 3].values(); var next = iterator.next(); "#; forward(&mut context, init); assert_eq!(forward(&mut context, "next.value"), "1"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "2"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "3"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "true"); } #[test] fn array_keys_simple() { let mut context = Context::new(); let init = r#" var iterator = [1, 2, 3].keys(); var next = iterator.next(); "#; forward(&mut context, init); assert_eq!(forward(&mut context, "next.value"), "0"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "1"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "2"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "true"); } #[test] fn array_entries_simple() { let mut context = Context::new(); let init = r#" var iterator = [1, 2, 3].entries(); var next = iterator.next(); "#; forward(&mut context, init); assert_eq!(forward(&mut context, "next.value"), "[ 0, 1 ]"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "[ 1, 2 ]"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "[ 2, 3 ]"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "true"); } #[test] fn array_values_empty() { let mut context = Context::new(); let init = r#" var iterator = [].values(); var next = iterator.next(); "#; forward(&mut context, init); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "true"); } #[test] fn array_values_sparse() { let mut context = Context::new(); let init = r#" var array = Array(); array[3] = 5; var iterator = array.values(); var next = iterator.next(); "#; forward(&mut context, init); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "5"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "true"); } #[test] fn array_symbol_iterator() { let mut context = Context::new(); let init = r#" var iterator = [1, 2, 3][Symbol.iterator](); var next = iterator.next(); "#; forward(&mut context, init); assert_eq!(forward(&mut context, "next.value"), "1"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "2"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "3"); assert_eq!(forward(&mut context, "next.done"), "false"); forward(&mut context, "next = iterator.next()"); assert_eq!(forward(&mut context, "next.value"), "undefined"); assert_eq!(forward(&mut context, "next.done"), "true"); } #[test] fn array_values_symbol_iterator() { let mut context = Context::new(); let init = r#" var iterator = [1, 2, 3].values(); iterator === iterator[Symbol.iterator](); "#; assert_eq!(forward(&mut context, init), "true"); } #[test] fn array_spread_arrays() { let mut context = Context::new(); let init = r#" const array1 = [2, 3]; const array2 = [1, ...array1]; array2[0] === 1 && array2[1] === 2 && array2[2] === 3; "#; assert_eq!(forward(&mut context, init), "true"); } #[test] fn array_spread_non_iterable() { let mut context = Context::new(); let init = r#" try { const array2 = [...5]; } catch (err) { err.name === "TypeError" && err.message === "Not an iterable" } "#; assert_eq!(forward(&mut context, init), "true"); } #[test] fn get_relative_start() { let mut context = Context::new(); assert_eq!(Array::get_relative_start(&mut context, None, 10), Ok(0)); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::undefined()), 10), Ok(0) ); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::from(f64::NEG_INFINITY)), 10), Ok(0) ); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::from(f64::INFINITY)), 10), Ok(10) ); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::from(-1)), 10), Ok(9) ); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::from(1)), 10), Ok(1) ); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::from(-11)), 10), Ok(0) ); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::from(11)), 10), Ok(10) ); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::from(f64::MIN)), 10), Ok(0) ); assert_eq!( Array::get_relative_start( &mut context, Some(&Value::from(Number::MIN_SAFE_INTEGER)), 10 ), Ok(0) ); assert_eq!( Array::get_relative_start(&mut context, Some(&Value::from(f64::MAX)), 10), Ok(10) ); // This test is relevant only on 32-bit archs (where usize == u32 thus `len` is u32) assert_eq!( Array::get_relative_start( &mut context, Some(&Value::from(Number::MAX_SAFE_INTEGER)), 10 ), Ok(10) ); } #[test] fn get_relative_end() { let mut context = Context::new(); assert_eq!(Array::get_relative_end(&mut context, None, 10), Ok(10)); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::undefined()), 10), Ok(10) ); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::from(f64::NEG_INFINITY)), 10), Ok(0) ); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::from(f64::INFINITY)), 10), Ok(10) ); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::from(-1)), 10), Ok(9) ); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::from(1)), 10), Ok(1) ); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::from(-11)), 10), Ok(0) ); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::from(11)), 10), Ok(10) ); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::from(f64::MIN)), 10), Ok(0) ); assert_eq!( Array::get_relative_end( &mut context, Some(&Value::from(Number::MIN_SAFE_INTEGER)), 10 ), Ok(0) ); assert_eq!( Array::get_relative_end(&mut context, Some(&Value::from(f64::MAX)), 10), Ok(10) ); // This test is relevant only on 32-bit archs (where usize == u32 thus `len` is u32) assert_eq!( Array::get_relative_end( &mut context, Some(&Value::from(Number::MAX_SAFE_INTEGER)), 10 ), Ok(10) ); } #[test] fn array_length_is_not_enumerable() { let context = Context::new(); let array = Array::new_array(&context); let desc = array.get_property("length").unwrap(); assert!(!desc.enumerable()); }
29.804575
110
0.549791
2222f882256853d2c2954e99d4ca89c99d8f7644
1,930
use std::{path::PathBuf, time::Duration}; use nuget_api::v3::{Body, NuGetClient}; use turron_command::{ async_trait::async_trait, clap::{self, Clap}, indicatif::ProgressBar, turron_config::TurronConfigLayer, TurronCommand, }; use turron_common::{ miette::{Context, IntoDiagnostic, Result}, smol::{self, Timer}, tracing, }; #[derive(Debug, Clap, TurronConfigLayer)] #[config_layer = "publish"] pub struct PublishCmd { #[clap(about = "Specific packages to publish, if not the current path")] nupkgs: Vec<PathBuf>, #[clap( about = "Source to ping", default_value = "https://api.nuget.org/v3/index.json", long )] source: String, #[clap(from_global)] verbosity: tracing::Level, #[clap(from_global)] quiet: bool, #[clap(from_global)] json: bool, #[clap(from_global)] api_key: Option<String>, } #[async_trait] impl TurronCommand for PublishCmd { async fn execute(self) -> Result<()> { let spinner = if self.quiet || self.json { ProgressBar::hidden() } else { ProgressBar::new_spinner() }; let spin_clone = spinner.clone(); let spin_fut = smol::spawn(async move { while !spin_clone.is_finished() { spin_clone.tick(); Timer::after(Duration::from_millis(20)).await; } }); let client = NuGetClient::from_source(self.source.clone()) .await? .with_key(self.api_key); let body = Body::from_file(&self.nupkgs[0]) .await .into_diagnostic() .context("Failed to open provided nupkg")?; spinner.println(format!("Uploading nupkg to {}...", self.source)); client.push(body).await?; spinner.println("...package upload succeeded."); spinner.finish(); spin_fut.await; Ok(()) } }
26.805556
76
0.581865
182405a640dbcfb2d506607c6c6194f355c5c06e
15,089
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(non_snake_case)] // Error messages for EXXXX errors. // Each message should start and end with a new line, and be wrapped to 80 characters. // In vim you can `:set tw=80` and use `gq` to wrap paragraphs. Use `:set tw=0` to disable. register_long_diagnostics! { E0001: r##" This error suggests that the expression arm corresponding to the noted pattern will never be reached as for all possible values of the expression being matched, one of the preceding patterns will match. This means that perhaps some of the preceding patterns are too general, this one is too specific or the ordering is incorrect. "##, E0002: r##" This error indicates that an empty match expression is illegal because the type it is matching on is non-empty (there exist values of this type). In safe code it is impossible to create an instance of an empty type, so empty match expressions are almost never desired. This error is typically fixed by adding one or more cases to the match expression. An example of an empty type is `enum Empty { }`. "##, E0003: r##" Not-a-Number (NaN) values cannot be compared for equality and hence can never match the input to a match expression. To match against NaN values, you should instead use the `is_nan` method in a guard, as in: x if x.is_nan() => ... "##, E0004: r##" This error indicates that the compiler cannot guarantee a matching pattern for one or more possible inputs to a match expression. Guaranteed matches are required in order to assign values to match expressions, or alternatively, determine the flow of execution. If you encounter this error you must alter your patterns so that every possible value of the input type is matched. For types with a small number of variants (like enums) you should probably cover all cases explicitly. Alternatively, the underscore `_` wildcard pattern can be added after all other patterns to match "anything else". "##, // FIXME: Remove duplication here? E0005: r##" Patterns used to bind names must be irrefutable, that is, they must guarantee that a name will be extracted in all cases. If you encounter this error you probably need to use a `match` or `if let` to deal with the possibility of failure. "##, E0006: r##" Patterns used to bind names must be irrefutable, that is, they must guarantee that a name will be extracted in all cases. If you encounter this error you probably need to use a `match` or `if let` to deal with the possibility of failure. "##, E0007: r##" This error indicates that the bindings in a match arm would require a value to be moved into more than one location, thus violating unique ownership. Code like the following is invalid as it requires the entire Option<String> to be moved into a variable called `op_string` while simultaneously requiring the inner String to be moved into a variable called `s`. ``` let x = Some("s".to_string()); match x { op_string @ Some(s) => ... None => ... } ``` See also Error 303. "##, E0008: r##" Names bound in match arms retain their type in pattern guards. As such, if a name is bound by move in a pattern, it should also be moved to wherever it is referenced in the pattern guard code. Doing so however would prevent the name from being available in the body of the match arm. Consider the following: ``` match Some("hi".to_string()) { Some(s) if s.len() == 0 => // use s. ... } ``` The variable `s` has type String, and its use in the guard is as a variable of type String. The guard code effectively executes in a separate scope to the body of the arm, so the value would be moved into this anonymous scope and therefore become unavailable in the body of the arm. Although this example seems innocuous, the problem is most clear when considering functions that take their argument by value. ``` match Some("hi".to_string()) { Some(s) if { drop(s); false } => (), Some(s) => // use s. ... } ``` The value would be dropped in the guard then become unavailable not only in the body of that arm but also in all subsequent arms! The solution is to bind by reference when using guards or refactor the entire expression, perhaps by putting the condition inside the body of the arm. "##, E0009: r##" In a pattern, all values that don't implement the `Copy` trait have to be bound the same way. The goal here is to avoid binding simultaneously by-move and by-ref. This limitation may be removed in a future version of Rust. Wrong example: ``` struct X { x: (), } let x = Some((X { x: () }, X { x: () })); match x { Some((y, ref z)) => {}, None => panic!() } ``` You have two solutions: 1. Bind the pattern's values the same way: ``` struct X { x: (), } let x = Some((X { x: () }, X { x: () })); match x { Some((ref y, ref z)) => {}, // or Some((y, z)) => {} None => panic!() } ``` 2. Implement the `Copy` trait for the X structure (however, please keep in mind that the first solution should be preferred!): ``` #[derive(Clone, Copy)] struct X { x: (), } let x = Some((X { x: () }, X { x: () })); match x { Some((y, ref z)) => {}, None => panic!() } ``` "##, E0015: r##" The only function calls allowed in static or constant expressions are enum variant constructors or struct constructors (for unit or tuple structs). This is because Rust currently does not support compile-time function execution. "##, E0018: r##" The value of static and const variables must be known at compile time. You can't cast a pointer as an integer because we can't know what value the address will take. However, pointers to other constants' addresses are allowed in constants, example: ``` const X: u32 = 50; const Y: *const u32 = &X; ``` Therefore, casting one of these non-constant pointers to an integer results in a non-constant integer which lead to this error. Example: ``` const X: u32 = 50; const Y: *const u32 = &X; println!("{:?}", Y); ``` "##, E0020: r##" This error indicates that an attempt was made to divide by zero (or take the remainder of a zero divisor) in a static or constant expression. "##, E0133: r##" Using unsafe functionality, such as dereferencing raw pointers and calling functions via FFI or marked as unsafe, is potentially dangerous and disallowed by safety checks. As such, those safety checks can be temporarily relaxed by wrapping the unsafe instructions inside an `unsafe` block. For instance: unsafe fn f() { return; } fn main() { unsafe { f(); } } See also http://doc.rust-lang.org/book/unsafe.html "##, E0152: r##" Lang items are already implemented in the standard library. Unless you are writing a free-standing application (e.g. a kernel), you do not need to provide them yourself. You can build a free-standing crate by adding `#![no_std]` to the crate attributes: ``` #![feature(no_std)] #![no_std] ``` See also https://doc.rust-lang.org/book/no-stdlib.html "##, E0158: r##" `const` and `static` mean different things. A `const` is a compile-time constant, an alias for a literal value. This property means you can match it directly within a pattern. The `static` keyword, on the other hand, guarantees a fixed location in memory. This does not always mean that the value is constant. For example, a global mutex can be declared `static` as well. If you want to match against a `static`, consider using a guard instead: ``` static FORTY_TWO: i32 = 42; match Some(42) { Some(x) if x == FORTY_TWO => ... ... } ``` "##, E0161: r##" In Rust, you can only move a value when its size is known at compile time. To work around this restriction, consider "hiding" the value behind a reference: either `&x` or `&mut x`. Since a reference has a fixed size, this lets you move it around as usual. "##, E0162: r##" An if-let pattern attempts to match the pattern, and enters the body if the match was succesful. If the match is irrefutable (when it cannot fail to match), use a regular `let`-binding instead. For instance: ``` struct Irrefutable(i32); let irr = Irrefutable(0); // This fails to compile because the match is irrefutable. if let Irrefutable(x) = irr { // This body will always be executed. foo(x); } // Try this instead: let Irrefutable(x) = irr; foo(x); ``` "##, E0165: r##" A while-let pattern attempts to match the pattern, and enters the body if the match was succesful. If the match is irrefutable (when it cannot fail to match), use a regular `let`-binding inside a `loop` instead. For instance: ``` struct Irrefutable(i32); let irr = Irrefutable(0); // This fails to compile because the match is irrefutable. while let Irrefutable(x) = irr { ... } // Try this instead: loop { let Irrefutable(x) = irr; ... } ``` "##, E0170: r##" Enum variants are qualified by default. For example, given this type: ``` enum Method { GET, POST } ``` you would match it using: ``` match m { Method::GET => ... Method::POST => ... } ``` If you don't qualify the names, the code will bind new variables named "GET" and "POST" instead. This behavior is likely not what you want, so rustc warns when that happens. Qualified names are good practice, and most code works well with them. But if you prefer them unqualified, you can import the variants into scope: ``` use Method::*; enum Method { GET, POST } ``` "##, E0267: r##" This error indicates the use of loop keyword (break or continue) inside a closure but outside of any loop. Break and continue can be used as normal inside closures as long as they are also contained within a loop. To halt the execution of a closure you should instead use a return statement. "##, E0268: r##" This error indicates the use of loop keyword (break or continue) outside of a loop. Without a loop to break out of or continue in, no sensible action can be taken. "##, E0296: r##" This error indicates that the given recursion limit could not be parsed. Ensure that the value provided is a positive integer between quotes, like so: ``` #![recursion_limit="1000"] ``` "##, E0297: r##" Patterns used to bind names must be irrefutable. That is, they must guarantee that a name will be extracted in all cases. Instead of pattern matching the loop variable, consider using a `match` or `if let` inside the loop body. For instance: ``` // This fails because `None` is not covered. for Some(x) in xs { ... } // Match inside the loop instead: for item in xs { match item { Some(x) => ... None => ... } } // Or use `if let`: for item in xs { if let Some(x) = item { ... } } ``` "##, E0301: r##" Mutable borrows are not allowed in pattern guards, because matching cannot have side effects. Side effects could alter the matched object or the environment on which the match depends in such a way, that the match would not be exhaustive. For instance, the following would not match any arm if mutable borrows were allowed: ``` match Some(()) { None => { }, option if option.take().is_none() => { /* impossible, option is `Some` */ }, Some(_) => { } // When the previous match failed, the option became `None`. } ``` "##, E0302: r##" Assignments are not allowed in pattern guards, because matching cannot have side effects. Side effects could alter the matched object or the environment on which the match depends in such a way, that the match would not be exhaustive. For instance, the following would not match any arm if assignments were allowed: ``` match Some(()) { None => { }, option if { option = None; false } { }, Some(_) => { } // When the previous match failed, the option became `None`. } ``` "##, E0303: r##" In certain cases it is possible for sub-bindings to violate memory safety. Updates to the borrow checker in a future version of Rust may remove this restriction, but for now patterns must be rewritten without sub-bindings. ``` // Code like this... match Some(5) { ref op_num @ Some(num) => ... None => ... } // After. match Some("hi".to_string()) { Some(ref s) => { let op_string_ref = &Some(&s); ... } None => ... } ``` The `op_string_ref` binding has type &Option<&String> in both cases. See also https://github.com/rust-lang/rust/issues/14587 "##, E0306: r##" In an array literal `[x; N]`, `N` is the number of elements in the array. This number cannot be negative. "##, E0307: r##" The length of an array is part of its type. For this reason, this length must be a compile-time constant. "## } register_diagnostics! { E0010, E0011, E0012, E0013, E0014, E0016, E0017, E0019, E0022, E0079, // enum variant: expected signed integer constant E0080, // enum variant: constant evaluation error E0109, E0110, E0134, E0135, E0136, E0137, E0138, E0139, E0261, // use of undeclared lifetime name E0262, // illegal lifetime parameter name E0263, // lifetime name declared twice in same scope E0264, // unknown external lang item E0265, // recursive constant E0266, // expected item E0269, // not all control paths return a value E0270, // computation may converge in a function marked as diverging E0271, // type mismatch resolving E0272, // rustc_on_unimplemented attribute refers to non-existent type parameter E0273, // rustc_on_unimplemented must have named format arguments E0274, // rustc_on_unimplemented must have a value E0275, // overflow evaluating requirement E0276, // requirement appears on impl method but not on corresponding trait method E0277, // trait is not implemented for type E0278, // requirement is not satisfied E0279, // requirement is not satisfied E0280, // requirement is not satisfied E0281, // type implements trait but other trait is required E0282, // unable to infer enough type information about E0283, // cannot resolve type E0284, // cannot resolve type E0285, // overflow evaluation builtin bounds E0298, // mismatched types between arms E0299, // mismatched types between arms E0300, // unexpanded macro E0304, // expected signed integer constant E0305, // expected constant E0308, E0309, // thing may not live long enough E0310, // thing may not live long enough E0311, // thing may not live long enough E0312, // lifetime of reference outlives lifetime of borrowed content E0313, // lifetime of borrowed pointer outlives lifetime of captured variable E0314, // closure outlives stack frame E0315, // cannot invoke closure outside of its lifetime E0316, // nested quantification of lifetimes E0370 // discriminant overflow } __build_diagnostic_array! { DIAGNOSTICS }
28.90613
91
0.702896
8a5cd3ca7e487ceec98d392d62c6d92ce1b1e556
1,347
//! Module concerned with handling the global application lifecycle of eww. //! Currently, this only means handling application exit by providing a global //! `recv_exit()` function which can be awaited to receive an event in case of application termination. use anyhow::*; use once_cell::sync::Lazy; use tokio::sync::broadcast; pub static APPLICATION_EXIT_SENDER: Lazy<broadcast::Sender<()>> = Lazy::new(|| broadcast::channel(2).0); /// Notify all listening tasks of the termination of the eww application process. pub fn send_exit() -> Result<()> { (APPLICATION_EXIT_SENDER).send(()).context("Failed to send exit lifecycle event")?; Ok(()) } /// Yields Ok(()) on application termination. Await on this in all long-running tasks /// and perform any cleanup if necessary. pub async fn recv_exit() -> Result<()> { (APPLICATION_EXIT_SENDER).subscribe().recv().await.context("Failed to receive lifecycle event") } /// Select in a loop, breaking once a application termination event (see `crate::application_lifecycle`) is received. #[macro_export] macro_rules! loop_select_exiting { ($($content:tt)*) => { loop { tokio::select! { Ok(()) = crate::application_lifecycle::recv_exit() => { break; } $($content)* } } }; }
36.405405
117
0.657016
283e01b3d975cdc4f9e4da90259fc21071eade0f
11,596
use crate::db; use spacetraders::client::{Client, HttpClient}; use sqlx::PgPool; use spacetraders::{client, responses, shared}; use spacetraders::responses::MyShips; use spacetraders::shared::LoanType; use spacetraders::errors::SpaceTradersClientError; use crate::ship_machines::{ShipMachine, ShipAssignment, builder::ShipMachineBuilder}; #[derive(Debug, Clone)] pub struct User { pub username: String, pub token: String, pub id: String, client: Client, pg_pool: PgPool, new_ship_assignment: ShipAssignment, pub new_ship_system: String, pub new_ship_location: Option<String>, pub ship_machines: Vec<ShipMachine>, pub loans: Vec<shared::Loan>, pub outstanding_loans: usize, pub credits: i32, } impl User { pub async fn new(http_client: HttpClient, pg_pool: PgPool, username: String, new_ship_assignment: ShipAssignment, new_ship_system: String, new_ship_location: Option<String>) -> anyhow::Result<User> { let db_user = db::get_user(pg_pool.clone(), username.clone()).await?; if let Some(user) = db_user { log::debug!("Found existing user {}", username); let client = Client::new(http_client, user.username, user.token.clone()); let info = client.get_my_info().await?; let ships = client.get_my_ships().await?; let loans = client.get_my_loans().await?; log::info!("User credits {}", info.user.credits); let mut user = User { username, token: user.token.clone(), id: user.id, client, pg_pool: pg_pool.clone(), new_ship_assignment: new_ship_assignment.clone(), new_ship_system: new_ship_system.clone(), new_ship_location: new_ship_location.clone(), ship_machines: Vec::new(), credits: info.user.credits, outstanding_loans: loans.loans.iter().filter(|f| { !f.status.contains("PAID") }).count(), loans: loans.loans, }; user.add_ship_machines_from_user_info(&ships, &new_ship_assignment); for ship in &ships.ships { db::persist_ship(pg_pool.clone(), &user.id, &new_ship_system, ship).await?; } Ok(user) } else { log::debug!("Creating new user {}", username); let claimed_user = client::claim_username(http_client.clone(), username.clone()).await?; log::info!("Claimed new user {:?}", claimed_user); let db_user = db::persist_user( pg_pool.clone(), username.clone(), claimed_user.token.clone(), &new_ship_assignment, &new_ship_system, ).await?; log::debug!("New user persisted"); let client = Client::new(http_client, username.clone(), claimed_user.token.clone()); let info = client.get_my_info().await?; let ships = client.get_my_ships().await?; let loans = client.get_my_loans().await?; log::info!("User credits {}", info.user.credits); let mut user = User { username: username.clone(), token: claimed_user.token.clone(), id: db_user.id, client, pg_pool: pg_pool.clone(), new_ship_assignment: new_ship_assignment.clone(), new_ship_system: new_ship_system.clone(), new_ship_location: new_ship_location.clone(), ship_machines: Vec::new(), credits: info.user.credits, loans: loans.loans.clone(), outstanding_loans: loans.loans.iter().filter(|f| { !f.status.contains("PAID") }).count() }; user.add_ship_machines_from_user_info(&ships, &new_ship_assignment); for ship in &ships.ships { db::persist_ship(pg_pool.clone(), &user.id, &new_ship_system, ship).await?; } Ok(user) } } fn add_ship_machines_from_user_info(&mut self, ships: &MyShips, assignment: &ShipAssignment) { self.ship_machines = ships.ships.iter().map(|ship| { self.ship_to_machine(ship, &assignment) }).collect() } fn ship_to_machine(&self, ship: &shared::Ship, assignment: &ShipAssignment) -> ShipMachine { let mut ship_machine_builder = ShipMachineBuilder::new(); ship_machine_builder.client(self.client.clone()) .pg_pool(self.pg_pool.clone()) .user_id(self.id.clone()) .username(self.username.clone()) .system(self.new_ship_system.clone()) .assignment(assignment.clone()) .ship(ship.clone()); if let Some(new_ship_location) = self.new_ship_location.clone() { ship_machine_builder.location(new_ship_location); } ship_machine_builder.build().expect("Unable to build ship") } pub async fn request_new_loan(&mut self, loan_type: LoanType) -> anyhow::Result<()> { let loan_response = self.client.request_new_loan(loan_type).await?; // Update our info to contain the new data from the loan response self.credits = loan_response.credits; // Keep track of loans... self.loans.push(loan_response.loan); self.outstanding_loans = self.loans.iter().filter(|f| { !f.status.contains("PAID") }).count(); Ok(()) } pub async fn purchase_ship(&mut self, fastest_ship_location: String, ship_type: String) -> anyhow::Result<()> { let purchase_ship_response = self.client.purchase_ship(fastest_ship_location, ship_type).await?; // TODO: Record new ship db::persist_ship(self.pg_pool.clone(), &self.id, &self.new_ship_system, &purchase_ship_response.ship).await?; self.credits = purchase_ship_response.credits; self.ship_machines.push(self.ship_to_machine(&purchase_ship_response.ship, &self.new_ship_assignment)); Ok(()) } pub async fn purchase_fastest_ship(&mut self) -> anyhow::Result<()> { let available_ships = self.client.get_ships_for_sale().await?; let mut fastest_ship = None; let mut fastest_ship_speed: i32 = 0; let mut fastest_ship_location = "".to_string(); let mut fastest_ship_price: i32 = 0; let ships = self.client.get_my_ships().await?; let ships_count = ships.ships.len(); let valid_locations: Vec<String> = ships.ships .into_iter() .filter(|s| s.location != None) .map(|s| s.location.unwrap()) .collect(); log::info!("{} -- Valid locations to purchase a ship from are {:?}", self.username, valid_locations.clone()); log::info!("{} -- User currently has {} ships", self.username, ships_count); log::info!("{} -- Ships available for purchase {:?}", self.username, available_ships.clone()); if ships_count > 0 && valid_locations.is_empty() { log::warn!("{} -- No docked ships found to purchase ships with. Will retry later", self.username); return Ok(()); } for available_ship in &available_ships.ships { for purchase_location in &available_ship.purchase_locations { if available_ship.speed > fastest_ship_speed && available_ship.restricted_goods == None && self.credits > purchase_location.price && (ships_count == 0 || valid_locations.contains(&purchase_location.location)) && purchase_location.system == self.new_ship_system { fastest_ship_speed = available_ship.speed; fastest_ship = Some(available_ship); fastest_ship_location = purchase_location.location.clone(); fastest_ship_price = purchase_location.price; } } } if let Some(ship) = fastest_ship { log::info!("Ship {} -- Buying {} for {} at location {}", self.username, ship.ship_type.clone(), fastest_ship_price, fastest_ship_location); self.purchase_ship(fastest_ship_location, ship.ship_type.clone()).await?; } else { log::warn!("Unable to find a ship for the user to purchase"); } Ok(()) } pub async fn purchase_largest_ship(&mut self) -> anyhow::Result<()> { let available_ships = self.client.get_ships_for_sale().await?; let mut largest_ship = None; let mut largest_ship_capacity: i32 = 0; let mut largest_ship_location = "".to_string(); let mut largest_ship_price: i32 = 0; let ships = self.client.get_my_ships().await?; let ships_count = ships.ships.len(); let valid_locations: Vec<String> = ships.ships .into_iter() .filter(|s| s.location != None) .map(|s| s.location.unwrap()) .collect(); log::info!("{} -- Valid locations to purchase a ship from are {:?}", self.username, valid_locations.clone()); log::info!("{} -- User currently has {} ships", self.username, ships_count); log::info!("{} -- Ships available for purchase {:?}", self.username, available_ships.clone()); if ships_count > 0 && valid_locations.is_empty() { log::warn!("{} -- No docked ships found to purchase ships with. Will retry later", self.username); return Ok(()); } for available_ship in &available_ships.ships { for purchase_location in &available_ship.purchase_locations { if available_ship.max_cargo > largest_ship_capacity && available_ship.restricted_goods == None && self.credits > purchase_location.price && (ships_count == 0 || valid_locations.contains(&purchase_location.location)) && purchase_location.system == self.new_ship_system { largest_ship_capacity = available_ship.max_cargo; largest_ship = Some(available_ship); largest_ship_location = purchase_location.location.clone(); largest_ship_price = purchase_location.price; } } } if let Some(ship) = largest_ship { log::info!("Ship {} -- Buying {} for {} at location {}", self.username, ship.ship_type.clone(), largest_ship_price, largest_ship_location); self.purchase_ship(largest_ship_location, ship.ship_type.clone()).await?; } else { log::warn!("Unable to find a ship for the user to purchase"); } Ok(()) } pub async fn get_systems(&self) -> anyhow::Result<responses::SystemsInfo> { let systems_info = self.client.get_systems_info().await?; log::debug!("Systems info: {:?}", systems_info); for system in &systems_info.systems { for location in &system.locations { db::persist_system_location(self.pg_pool.clone(), system, location).await?; } } Ok(systems_info) } pub async fn get_my_ships(&self) -> Result<responses::MyShips, SpaceTradersClientError> { self.client.get_my_ships().await } pub async fn pay_off_loan(&self, loan_id: &str) -> Result<responses::PayLoanResponse, SpaceTradersClientError> { self.client.pay_off_loan(loan_id).await } }
41.562724
203
0.597534
2ffc0485999070eeb3dac809e5481118ab92ab30
11,621
use crate::app::App; use abstutil::Counter; use ezgui::{Color, Drawable, EventCtx, FancyColor, GeomBatch, Line, LinearGradient, Text, Widget}; use geom::{Circle, Distance, Line, Polygon, Pt2D}; use map_model::{BuildingID, BusStopID, IntersectionID, LaneID, Map, ParkingLotID, RoadID}; use std::collections::HashMap; pub struct ColorDiscrete<'a> { map: &'a Map, unzoomed: GeomBatch, zoomed: GeomBatch, // Store both, so we can build the legend in the original order later categories: Vec<(&'static str, Color)>, colors: HashMap<&'static str, Color>, } impl<'a> ColorDiscrete<'a> { pub fn new(app: &'a App, categories: Vec<(&'static str, Color)>) -> ColorDiscrete<'a> { let mut unzoomed = GeomBatch::new(); unzoomed.push( app.cs.fade_map_dark, app.primary.map.get_boundary_polygon().clone(), ); ColorDiscrete { map: &app.primary.map, unzoomed, zoomed: GeomBatch::new(), colors: categories.iter().cloned().collect(), categories, } } pub fn add_l(&mut self, l: LaneID, category: &'static str) { let color = self.colors[category]; self.unzoomed .push(color, self.map.get_parent(l).get_thick_polygon(self.map)); let lane = self.map.get_l(l); self.zoomed.push( color.alpha(0.4), lane.lane_center_pts.make_polygons(lane.width), ); } pub fn add_r(&mut self, r: RoadID, category: &'static str) { let color = self.colors[category]; self.unzoomed .push(color, self.map.get_r(r).get_thick_polygon(self.map)); self.zoomed.push( color.alpha(0.4), self.map.get_r(r).get_thick_polygon(self.map), ); } pub fn add_i(&mut self, i: IntersectionID, category: &'static str) { let color = self.colors[category]; self.unzoomed.push(color, self.map.get_i(i).polygon.clone()); self.zoomed .push(color.alpha(0.4), self.map.get_i(i).polygon.clone()); } pub fn add_b(&mut self, b: BuildingID, category: &'static str) { let color = self.colors[category]; self.unzoomed.push(color, self.map.get_b(b).polygon.clone()); self.zoomed .push(color.alpha(0.4), self.map.get_b(b).polygon.clone()); } pub fn add_bs(&mut self, bs: BusStopID, category: &'static str) { let color = self.colors[category]; let pt = self.map.get_bs(bs).sidewalk_pos.pt(self.map); self.zoomed.push( color.alpha(0.4), Circle::new(pt, Distance::meters(5.0)).to_polygon(), ); self.unzoomed .push(color, Circle::new(pt, Distance::meters(15.0)).to_polygon()); } pub fn build(self, ctx: &mut EventCtx) -> (Drawable, Drawable, Widget) { let legend = self .categories .into_iter() .map(|(name, color)| ColorLegend::row(ctx, color, name)) .collect(); ( ctx.upload(self.unzoomed), ctx.upload(self.zoomed), Widget::col(legend), ) } } pub struct ColorLegend {} impl ColorLegend { pub fn row<S: Into<String>>(ctx: &mut EventCtx, color: Color, label: S) -> Widget { let radius = 15.0; Widget::row(vec![ Widget::draw_batch( ctx, GeomBatch::from(vec![( color, Circle::new(Pt2D::new(radius, radius), Distance::meters(radius)).to_polygon(), )]), ) .centered_vert(), Text::from(Line(label)).wrap_to_pct(ctx, 35).draw(ctx), ]) } pub fn gradient<I: Into<String>>( ctx: &mut EventCtx, scale: &ColorScale, labels: Vec<I>, ) -> Widget { assert!(scale.0.len() >= 2); let width = 300.0; let n = scale.0.len(); let mut batch = GeomBatch::new(); let width_each = width / ((n - 1) as f64); batch.fancy_push( FancyColor::LinearGradient(LinearGradient { line: Line::must_new(Pt2D::new(0.0, 0.0), Pt2D::new(width, 0.0)), stops: scale .0 .iter() .enumerate() .map(|(idx, color)| ((idx as f64) / ((n - 1) as f64), *color)) .collect(), }), Polygon::union_all( (0..n - 1) .map(|i| { Polygon::rectangle(width_each, 32.0).translate((i as f64) * width_each, 0.0) }) .collect(), ), ); // Extra wrapping to make the labels stretch against just the scale, not everything else // TODO Long labels aren't nicely lined up with the boundaries between buckets Widget::col(vec![ Widget::draw_batch(ctx, batch), Widget::custom_row( labels .into_iter() .map(|lbl| Line(lbl).small().draw(ctx)) .collect(), ) .evenly_spaced(), ]) .container() } } pub struct DivergingScale { low_color: Color, mid_color: Color, high_color: Color, min: f64, avg: f64, max: f64, ignore: Option<(f64, f64)>, } impl DivergingScale { pub fn new(low_color: Color, mid_color: Color, high_color: Color) -> DivergingScale { DivergingScale { low_color, mid_color, high_color, min: 0.0, avg: 0.5, max: 1.0, ignore: None, } } pub fn range(mut self, min: f64, max: f64) -> DivergingScale { assert!(min < max); self.min = min; self.avg = (min + max) / 2.0; self.max = max; self } pub fn ignore(mut self, from: f64, to: f64) -> DivergingScale { assert!(from < to); self.ignore = Some((from, to)); self } pub fn eval(&self, value: f64) -> Option<Color> { let value = value.max(self.min).min(self.max); if let Some((from, to)) = self.ignore { if value >= from && value <= to { return None; } } if value <= self.avg { Some( self.low_color .lerp(self.mid_color, (value - self.min) / (self.avg - self.min)), ) } else { Some( self.mid_color .lerp(self.high_color, (value - self.avg) / (self.max - self.avg)), ) } } pub fn make_legend<I: Into<String>>(self, ctx: &mut EventCtx, labels: Vec<I>) -> Widget { ColorLegend::gradient( ctx, &ColorScale(vec![self.low_color, self.mid_color, self.high_color]), labels, ) } } // TODO Bad name pub struct ColorNetwork<'a> { map: &'a Map, pub unzoomed: GeomBatch, pub zoomed: GeomBatch, } impl<'a> ColorNetwork<'a> { pub fn new(app: &'a App) -> ColorNetwork { let mut unzoomed = GeomBatch::new(); unzoomed.push( app.cs.fade_map_dark, app.primary.map.get_boundary_polygon().clone(), ); ColorNetwork { map: &app.primary.map, unzoomed, zoomed: GeomBatch::new(), } } pub fn add_l(&mut self, l: LaneID, color: Color) { self.unzoomed .push(color, self.map.get_parent(l).get_thick_polygon(self.map)); let lane = self.map.get_l(l); self.zoomed.push( color.alpha(0.4), lane.lane_center_pts.make_polygons(lane.width), ); } pub fn add_r(&mut self, r: RoadID, color: Color) { self.unzoomed .push(color, self.map.get_r(r).get_thick_polygon(self.map)); self.zoomed.push( color.alpha(0.4), self.map.get_r(r).get_thick_polygon(self.map), ); } pub fn add_i(&mut self, i: IntersectionID, color: Color) { self.unzoomed.push(color, self.map.get_i(i).polygon.clone()); self.zoomed .push(color.alpha(0.4), self.map.get_i(i).polygon.clone()); } pub fn add_b(&mut self, b: BuildingID, color: Color) { self.unzoomed.push(color, self.map.get_b(b).polygon.clone()); self.zoomed .push(color.alpha(0.4), self.map.get_b(b).polygon.clone()); } pub fn add_pl(&mut self, pl: ParkingLotID, color: Color) { self.unzoomed .push(color, self.map.get_pl(pl).polygon.clone()); self.zoomed .push(color.alpha(0.4), self.map.get_pl(pl).polygon.clone()); } pub fn ranked_roads(&mut self, counter: Counter<RoadID>, scale: &ColorScale) { let roads = counter.sorted_asc(); let len = roads.len() as f64; for (idx, list) in roads.into_iter().enumerate() { let color = scale.eval((idx as f64) / len); for r in list { self.add_r(r, color); } } } pub fn ranked_intersections(&mut self, counter: Counter<IntersectionID>, scale: &ColorScale) { let intersections = counter.sorted_asc(); let len = intersections.len() as f64; for (idx, list) in intersections.into_iter().enumerate() { let color = scale.eval((idx as f64) / len); for i in list { self.add_i(i, color); } } } pub fn build(self, ctx: &mut EventCtx) -> (Drawable, Drawable) { (ctx.upload(self.unzoomed), ctx.upload(self.zoomed)) } } pub struct ColorScale(pub Vec<Color>); impl ColorScale { pub fn eval(&self, pct: f64) -> Color { let (low, pct) = self.inner_eval(pct); self.0[low].lerp(self.0[low + 1], pct) } #[allow(unused)] pub fn from_colorous(gradient: colorous::Gradient) -> ColorScale { let n = 7; ColorScale( (0..n) .map(|i| { let c = gradient.eval_rational(i, n); Color::rgb(c.r as usize, c.g as usize, c.b as usize) }) .collect(), ) } fn inner_eval(&self, pct: f64) -> (usize, f64) { assert!(pct >= 0.0 && pct <= 1.0); // What's the interval between each pair of colors? let width = 1.0 / (self.0.len() - 1) as f64; let low = (pct / width).floor() as usize; if low == self.0.len() - 1 { return (low - 1, 1.0); } (low, (pct % width) / width) } } #[cfg(test)] mod tests { #[test] fn test_scale() { use super::ColorScale; use ezgui::Color; let two = ColorScale(vec![Color::BLACK, Color::WHITE]); assert_same((0, 0.0), two.inner_eval(0.0)); assert_same((0, 0.5), two.inner_eval(0.5)); assert_same((0, 1.0), two.inner_eval(1.0)); let three = ColorScale(vec![Color::BLACK, Color::RED, Color::WHITE]); assert_same((0, 0.0), three.inner_eval(0.0)); assert_same((0, 0.4), three.inner_eval(0.2)); assert_same((1, 0.0), three.inner_eval(0.5)); assert_same((1, 0.4), three.inner_eval(0.7)); assert_same((1, 1.0), three.inner_eval(1.0)); } fn assert_same(expected: (usize, f64), actual: (usize, f64)) { assert_eq!(expected.0, actual.0); if (expected.1 - actual.1).abs() > 0.0001 { panic!("{:?} != {:?}", expected, actual); } } }
31.751366
100
0.522244
486fc4042ba404a3e93e83c2af2cb0679867fb86
4,639
/* Copyright 2021 JFrog Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod args; pub mod network; use args::parser::PyrsiaNodeArgs; use network::handlers; use pyrsia::docker::error_util::*; use pyrsia::docker::v2::routes::make_docker_routes; use pyrsia::logging::*; use pyrsia::network::client::Client; use pyrsia::network::p2p; use pyrsia::node_api::routes::make_node_routes; use clap::Parser; use futures::StreamExt; use log::{debug, info, warn}; use std::error::Error; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use warp::Filter; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { pretty_env_logger::init(); debug!("Parse CLI arguments"); let args = PyrsiaNodeArgs::parse(); debug!("Create p2p components"); let (p2p_client, mut p2p_events, event_loop) = p2p::setup_libp2p_swarm(args.max_provided_keys)?; debug!("Start p2p event loop"); tokio::spawn(event_loop.run()); debug!("Setup HTTP server"); setup_http(&args, p2p_client.clone()); debug!("Start p2p components"); setup_p2p(p2p_client.clone(), args).await; debug!("Listen for p2p events"); loop { if let Some(event) = p2p_events.next().await { match event { // Reply with the content of the artifact on incoming requests. pyrsia::network::event_loop::PyrsiaEvent::RequestArtifact { artifact_type, artifact_hash, channel, } => { if let Err(error) = handlers::handle_request_artifact( p2p_client.clone(), &artifact_type, &artifact_hash, channel, ) .await { warn!( "This node failed to provide artifact with type {} and hash {}. Error: {:?}", artifact_type, artifact_hash, error ); } } pyrsia::network::event_loop::PyrsiaEvent::IdleMetricRequest { channel } => { if let Err(error) = handlers::handle_request_idle_metric(p2p_client.clone(), channel).await { warn!( "This node failed to provide idle metrics. Error: {:?}", error ); } } } } } } fn setup_http(args: &PyrsiaNodeArgs, p2p_client: Client) { // Get host and port from the settings. Defaults to DEFAULT_HOST and DEFAULT_PORT debug!( "Pyrsia Docker Node will bind to host = {}, port = {}", args.host, args.port ); let address = SocketAddr::new( IpAddr::V4(args.host.parse::<Ipv4Addr>().unwrap()), args.port.parse::<u16>().unwrap(), ); debug!("Setup HTTP routing"); let docker_routes = make_docker_routes(p2p_client.clone()); let node_api_routes = make_node_routes(p2p_client); let all_routes = docker_routes.or(node_api_routes); debug!("Setup HTTP server"); let (addr, server) = warp::serve( all_routes .and(http::log_headers()) .recover(custom_recover) .with(warp::log("pyrsia_registry")), ) .bind_ephemeral(address); info!( "Pyrsia Docker Node will start running on {}:{}", addr.ip(), addr.port() ); tokio::spawn(server); } async fn setup_p2p(mut p2p_client: Client, args: PyrsiaNodeArgs) { p2p_client .listen(&args.listen_address) .await .expect("Listening should not fail"); if let Some(to_dial) = args.peer { handlers::dial_other_peer(p2p_client.clone(), &to_dial).await; } debug!("Provide local artifacts"); if let Err(error) = handlers::provide_artifacts(p2p_client.clone()).await { warn!( "An error occured while providing local artifacts. Error: {:?}", error ); } }
31.773973
105
0.57189
c1593deadc3ce1b89d942f2e7c2f91968d873cfb
7,303
// Copyright 2018-2022 Cargill Incorporated // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Structs for building proposed services use std::convert::TryFrom; use crate::admin::messages::{self, is_valid_service_id}; use crate::error::InvalidStateError; use crate::protos::admin; /// Native representation of a service that is a part of a proposed circuit #[derive(Clone, Debug, PartialEq, Eq)] pub struct ProposedService { service_id: String, service_type: String, node_id: String, arguments: Vec<(String, String)>, } impl ProposedService { /// Returns the ID of the proposed service pub fn service_id(&self) -> &str { &self.service_id } /// Returns the service type of the proposed service pub fn service_type(&self) -> &str { &self.service_type } /// Returns the node the proposed service can run on pub fn node_id(&self) -> &str { &self.node_id } /// Returns the list of key/value arugments for the proposed service pub fn arguments(&self) -> &[(String, String)] { &self.arguments } pub fn into_proto(self) -> admin::SplinterService { let mut proto = admin::SplinterService::new(); proto.set_service_id(self.service_id); proto.set_service_type(self.service_type); proto.set_allowed_nodes(protobuf::RepeatedField::from_vec(vec![self.node_id])); proto.set_arguments(protobuf::RepeatedField::from_vec( self.arguments .into_iter() .map(|(k, v)| { let mut argument = admin::SplinterService_Argument::new(); argument.set_key(k); argument.set_value(v); argument }) .collect(), )); proto } pub fn from_proto(mut proto: admin::SplinterService) -> Result<Self, InvalidStateError> { Ok(Self { service_id: proto.take_service_id(), service_type: proto.take_service_type(), node_id: proto .take_allowed_nodes() .get(0) .ok_or_else(|| { InvalidStateError::with_message( "unable to build, missing field: `node_id`".to_string(), ) })? .to_string(), arguments: proto .take_arguments() .into_iter() .map(|mut argument| (argument.take_key(), argument.take_value())) .collect(), }) } } /// Builder for creating a `ProposedService` #[derive(Default, Clone)] pub struct ProposedServiceBuilder { service_id: Option<String>, service_type: Option<String>, node_id: Option<String>, arguments: Option<Vec<(String, String)>>, } impl ProposedServiceBuilder { /// Creates a new `ProposedServiceBuilder` pub fn new() -> Self { ProposedServiceBuilder::default() } /// Returns the service specific service ID pub fn service_id(&self) -> Option<String> { self.service_id.clone() } /// Returns the service type pub fn service_type(&self) -> Option<String> { self.service_type.clone() } /// Returns the node ID the service can connect to pub fn node_id(&self) -> Option<String> { self.node_id.clone() } /// Returns the list of arguments for the service pub fn arguments(&self) -> Option<Vec<(String, String)>> { self.arguments.clone() } /// Sets the service ID /// /// # Arguments /// /// * `service_id` - The unique service ID for service pub fn with_service_id(mut self, service_id: &str) -> ProposedServiceBuilder { self.service_id = Some(service_id.into()); self } /// Sets the service type /// /// # Arguments /// /// * `service_type` - The service type of the service pub fn with_service_type(mut self, service_type: &str) -> ProposedServiceBuilder { self.service_type = Some(service_type.into()); self } /// Sets the node ID the service is allowed to connect to /// /// # Arguments /// /// * `node_id` - A node ID of the node the service can connect to pub fn with_node_id(mut self, node_id: &str) -> ProposedServiceBuilder { self.node_id = Some(node_id.into()); self } /// Sets the service arguments /// /// # Arguments /// /// * `arguments` - A list of key-value pairs for the arguments for the service pub fn with_arguments(mut self, arguments: &[(String, String)]) -> ProposedServiceBuilder { self.arguments = Some(arguments.to_vec()); self } /// Builds the `ProposedService` /// /// Returns an error if the service ID, service_type, or allowed nodes is not set pub fn build(self) -> Result<ProposedService, InvalidStateError> { let service_id = match self.service_id { Some(service_id) if is_valid_service_id(&service_id) => service_id, Some(service_id) => { return Err(InvalidStateError::with_message(format!( "service_id is invalid ({}): must be a 4 character base62 string", service_id, ))) } None => { return Err(InvalidStateError::with_message( "unable to build, missing field: `service_id`".to_string(), )) } }; let service_type = self.service_type.ok_or_else(|| { InvalidStateError::with_message( "unable to build, missing field: `service_type`".to_string(), ) })?; let node_id = self.node_id.ok_or_else(|| { InvalidStateError::with_message("unable to build, missing field: `node_id`".to_string()) })?; let arguments = self.arguments.unwrap_or_default(); let service = ProposedService { service_id, service_type, node_id, arguments, }; Ok(service) } } impl TryFrom<&messages::SplinterService> for ProposedService { type Error = InvalidStateError; fn try_from( splinter_service: &messages::SplinterService, ) -> Result<ProposedService, Self::Error> { ProposedServiceBuilder::new() .with_service_id(&splinter_service.service_id) .with_service_type(&splinter_service.service_type) .with_node_id(splinter_service.allowed_nodes.get(0).ok_or_else(|| { InvalidStateError::with_message("Must contain 1 node ID".to_string()) })?) .with_arguments(&splinter_service.arguments) .build() } }
32.030702
100
0.596467
56315d1074dd2b60eb39f12a98375da3038dc98d
4,093
use core::mem; use iota_streams_core::Result; use super::{ unwrap::*, Context, }; use crate::{ command::Skip, io, types::{ ArrayLength, Bytes, Fallback, NBytes, Size, SkipFallback, Uint16, Uint32, Uint64, Uint8, }, }; struct SkipContext<F, IS> { ctx: Context<F, IS>, } impl<F, IS> AsMut<SkipContext<F, IS>> for Context<F, IS> { fn as_mut<'a>(&'a mut self) -> &'a mut SkipContext<F, IS> { unsafe { mem::transmute::<&'a mut Context<F, IS>, &'a mut SkipContext<F, IS>>(self) } } } impl<F, IS> AsMut<Context<F, IS>> for SkipContext<F, IS> { fn as_mut<'a>(&'a mut self) -> &'a mut Context<F, IS> { unsafe { mem::transmute::<&'a mut SkipContext<F, IS>, &'a mut Context<F, IS>>(self) } } } impl<F, IS: io::IStream> Unwrap for SkipContext<F, IS> { fn unwrap_u8(&mut self, u: &mut u8) -> Result<&mut Self> { let slice = self.ctx.stream.try_advance(1)?; *u = slice[0]; Ok(self) } fn unwrapn(&mut self, bytes: &mut [u8]) -> Result<&mut Self> { let slice = self.ctx.stream.try_advance(bytes.len())?; bytes.copy_from_slice(slice); Ok(self) } } fn unwrap_skip_u8<'a, F, IS: io::IStream>( ctx: &'a mut SkipContext<F, IS>, u: &mut Uint8, ) -> Result<&'a mut SkipContext<F, IS>> { ctx.unwrap_u8(&mut u.0) } fn unwrap_skip_u16<'a, F, IS: io::IStream>( ctx: &'a mut SkipContext<F, IS>, u: &mut Uint16, ) -> Result<&'a mut SkipContext<F, IS>> { ctx.unwrap_u16(&mut u.0) } fn unwrap_skip_u32<'a, F, IS: io::IStream>( ctx: &'a mut SkipContext<F, IS>, u: &mut Uint32, ) -> Result<&'a mut SkipContext<F, IS>> { ctx.unwrap_u32(&mut u.0) } fn unwrap_skip_u64<'a, F, IS: io::IStream>( ctx: &'a mut SkipContext<F, IS>, u: &mut Uint64, ) -> Result<&'a mut SkipContext<F, IS>> { ctx.unwrap_u64(&mut u.0) } fn unwrap_skip_size<'a, F, IS: io::IStream>( ctx: &'a mut SkipContext<F, IS>, size: &mut Size, ) -> Result<&'a mut SkipContext<F, IS>> { ctx.unwrap_size(size) } fn unwrap_skip_bytes<'a, F, IS: io::IStream>( ctx: &'a mut SkipContext<F, IS>, bytes: &mut [u8], ) -> Result<&'a mut SkipContext<F, IS>> { ctx.unwrapn(bytes) } impl<'a, F, IS: io::IStream> Skip<&'a mut Uint8> for Context<F, IS> { fn skip(&mut self, u: &'a mut Uint8) -> Result<&mut Self> { Ok(unwrap_skip_u8(self.as_mut(), u)?.as_mut()) } } impl<'a, F, IS: io::IStream> Skip<&'a mut Uint16> for Context<F, IS> { fn skip(&mut self, u: &'a mut Uint16) -> Result<&mut Self> { Ok(unwrap_skip_u16(self.as_mut(), u)?.as_mut()) } } impl<'a, F, IS: io::IStream> Skip<&'a mut Uint32> for Context<F, IS> { fn skip(&mut self, u: &'a mut Uint32) -> Result<&mut Self> { Ok(unwrap_skip_u32(self.as_mut(), u)?.as_mut()) } } impl<'a, F, IS: io::IStream> Skip<&'a mut Uint64> for Context<F, IS> { fn skip(&mut self, u: &'a mut Uint64) -> Result<&mut Self> { Ok(unwrap_skip_u64(self.as_mut(), u)?.as_mut()) } } impl<'a, F, IS: io::IStream> Skip<&'a mut Size> for Context<F, IS> { fn skip(&mut self, size: &'a mut Size) -> Result<&mut Self> { Ok(unwrap_skip_size(self.as_mut(), size)?.as_mut()) } } impl<'a, F, N: ArrayLength<u8>, IS: io::IStream> Skip<&'a mut NBytes<N>> for Context<F, IS> { fn skip(&mut self, nbytes: &'a mut NBytes<N>) -> Result<&mut Self> { Ok(unwrap_skip_bytes(self.as_mut(), nbytes.as_mut_slice())?.as_mut()) } } impl<'a, F, IS: io::IStream> Skip<&'a mut Bytes> for Context<F, IS> { fn skip(&mut self, bytes: &'a mut Bytes) -> Result<&mut Self> { let mut size = Size(0); self.skip(&mut size)?; (bytes.0).resize(size.0, 0); Ok(unwrap_skip_bytes(self.as_mut(), &mut (bytes.0)[..])?.as_mut()) } } impl<'a, F, T: 'a + SkipFallback<F>, IS: io::IStream> Skip<&'a mut Fallback<T>> for Context<F, IS> { fn skip(&mut self, val: &'a mut Fallback<T>) -> Result<&mut Self> { (val.0).unwrap_skip(self)?; Ok(self) } }
29.235714
100
0.566577
23b4de4a343f5102204a7467c94cc8ddc1c2a9bb
4,920
use menoh_sys; use std::ffi; use std::mem; use std::ptr; use std::slice; use Dtype; use handler::Handler; use Error; use error::check; /// Model, which executes computation. pub struct Model { handle: menoh_sys::menoh_model_handle, } impl Model { /// Fetch the shape of a variable. /// /// ``` /// # use menoh::*; /// # fn main() -> Result<(), Error> { /// # let model = Builder::from_onnx("MLP.onnx")? /// # .add_input::<f32>("input", &[2, 3])? /// # .add_output::<f32>("fc2")? /// # .build("mkldnn", "")?; /// let dims = model.get_variable_dims("fc2")?; /// # assert_eq!(dims, &[2, 5]); /// # Ok(()) /// # } /// ``` pub fn get_variable_dims(&self, name: &str) -> Result<Vec<usize>, Error> { let name = ffi::CString::new(name)?; unsafe { let mut size = 0; check(menoh_sys::menoh_model_get_variable_dims_size(self.handle, name.as_ptr(), &mut size))?; let mut dims = Vec::with_capacity(size as _); for index in 0..size { let mut dim = 0; check(menoh_sys::menoh_model_get_variable_dims_at(self.handle, name.as_ptr(), index, &mut dim))?; dims.push(dim as _); } Ok(dims) } } fn get_variable_dtype(&self, name: &str) -> Result<menoh_sys::menoh_dtype, Error> { let name = ffi::CString::new(name)?; unsafe { let mut dtype = mem::uninitialized(); check(menoh_sys::menoh_model_get_variable_dtype(self.handle, name.as_ptr(), &mut dtype))?; Ok(dtype) } } /// Fetch the shape and read-only view of a variable. /// /// ``` /// # use menoh::*; /// # fn main() -> Result<(), Error> { /// # let model = Builder::from_onnx("MLP.onnx")? /// # .add_input::<f32>("input", &[2, 3])? /// # .add_output::<f32>("fc2")? /// # .build("mkldnn", "")?; /// let (dims, buf) = model.get_variable::<f32>("fc2")?; /// # assert_eq!(dims, &[2, 5]); /// # Ok(()) /// # } /// ``` pub fn get_variable<T>(&self, name: &str) -> Result<(Vec<usize>, &[T]), Error> where T: Dtype { T::check(self.get_variable_dtype(name)?)?; let dims = self.get_variable_dims(name)?; let name = ffi::CString::new(name)?; let mut buffer = ptr::null_mut(); unsafe { check(menoh_sys::menoh_model_get_variable_buffer_handle(self.handle, name.as_ptr(), &mut buffer))?; let buffer = slice::from_raw_parts(buffer as _, dims.iter().product()); Ok((dims, buffer)) } } /// Fetch the shape and read/write view of a variable. /// /// ``` /// # use menoh::*; /// # fn main() -> Result<(), Error> { /// # let mut model = Builder::from_onnx("MLP.onnx")? /// # .add_input::<f32>("input", &[2, 3])? /// # .add_output::<f32>("fc2")? /// # .build("mkldnn", "")?; /// let (dims, buf) = model.get_variable_mut::<f32>("fc2")?; /// # assert_eq!(dims, &[2, 5]); /// # Ok(()) /// # } /// ``` pub fn get_variable_mut<T>(&mut self, name: &str) -> Result<(Vec<usize>, &mut [T]), Error> where T: Dtype { T::check(self.get_variable_dtype(name)?)?; let dims = self.get_variable_dims(name)?; let name = ffi::CString::new(name)?; let mut buffer = ptr::null_mut(); unsafe { check(menoh_sys::menoh_model_get_variable_buffer_handle(self.handle, name.as_ptr(), &mut buffer))?; let buffer = slice::from_raw_parts_mut(buffer as _, dims.iter().product()); Ok((dims, buffer)) } } pub fn run(&mut self) -> Result<(), Error> { unsafe { check(menoh_sys::menoh_model_run(self.handle)) } } } impl Handler for Model { type Handle = menoh_sys::menoh_model_handle; unsafe fn from_handle(handle: Self::Handle) -> Self { Self { handle } } unsafe fn handle(&self) -> Self::Handle { self.handle } } impl Drop for Model { fn drop(&mut self) { unsafe { menoh_sys::menoh_delete_model(self.handle) } } }
33.931034
94
0.44939
ef53d924f548623f5f2f676261abe8a272e9ecd3
11,027
#![allow( // need to test cloning, these are deliberate. clippy::redundant_clone, // yep, we create owned instance just for comparison, to test comparison // with owned instacnces. clippy::cmp_owned, )] use arcstr::ArcStr; #[test] fn test_various_partial_eq() { macro_rules! check_partial_eq { (@eq1; $a:expr, $b:expr) => {{ // Note: intentionally not assert_eq. assert!($a == $b); assert!(!($a != $b)); assert!($b == $a); assert!(!($b != $a)); }}; (@ne1; $a:expr, $b:expr) => { assert!($a != $b); assert!(!($a == $b)); assert!($b != $a); assert!(!($b == $a)); }; (@eq; $a:expr, $b:expr) => {{ check_partial_eq!(@eq1; $a, $b); check_partial_eq!(@eq1; $a.clone(), $b); check_partial_eq!(@eq1; $a.clone(), $a); }}; (@ne; $a:expr, $b:expr) => {{ check_partial_eq!(@ne1; $a, $b); check_partial_eq!(@ne1; $a.clone(), $b); }}; } check_partial_eq!(@eq; ArcStr::from("123"), "123"); check_partial_eq!(@eq; ArcStr::from("foobar"), *"foobar"); check_partial_eq!(@eq; ArcStr::from("🏳️‍🌈"), String::from("🏳️‍🌈")); check_partial_eq!(@eq; ArcStr::from("🏳️‍⚧️"), std::borrow::Cow::Borrowed("🏳️‍⚧️")); check_partial_eq!(@eq; ArcStr::from("🏴‍☠️"), std::borrow::Cow::Owned("🏴‍☠️".into())); check_partial_eq!(@eq; ArcStr::from(":o"), std::rc::Rc::<str>::from(":o")); check_partial_eq!(@eq; ArcStr::from("!!!"), std::sync::Arc::<str>::from("!!!")); check_partial_eq!(@eq; ArcStr::from(""), ""); check_partial_eq!(@eq; ArcStr::from(""), ArcStr::from("")); check_partial_eq!(@ne; ArcStr::from("123"), "124"); check_partial_eq!(@ne; ArcStr::from("Foobar"), *"FoobarFoobar"); check_partial_eq!(@ne; ArcStr::from("①"), String::from("1")); check_partial_eq!(@ne; ArcStr::from(""), String::from("1")); check_partial_eq!(@ne; ArcStr::from("abc"), String::from("")); check_partial_eq!(@ne; ArcStr::from("butts"), std::borrow::Cow::Borrowed("boots")); check_partial_eq!(@ne; ArcStr::from("bots"), std::borrow::Cow::Owned("🤖".into())); check_partial_eq!(@ne; ArcStr::from("put"), std::rc::Rc::<str>::from("⛳️")); check_partial_eq!(@ne; ArcStr::from("pots"), std::sync::Arc::<str>::from("🍲")); } #[test] fn test_indexing() { let a = ArcStr::from("12345"); assert_eq!(&a[..], "12345"); assert_eq!(&a[1..], "2345"); assert_eq!(&a[..4], "1234"); assert_eq!(&a[1..4], "234"); assert_eq!(&a[1..=3], "234"); assert_eq!(&a[..=3], "1234"); } #[test] fn test_fmt() { assert_eq!(format!("{}", ArcStr::from("test")), "test"); assert_eq!(format!("{:?}", ArcStr::from("test")), "\"test\""); // make sure we forward formatting to the real impl... let s = ArcStr::from("uwu"); assert_eq!(format!("{:.<6}", s), "uwu..."); assert_eq!(format!("{:.>6}", s), "...uwu"); assert_eq!(format!("{:.^9}", s), r#"...uwu..."#); } #[test] fn test_ord() { let mut arr = [ArcStr::from("foo"), "bar".into(), "baz".into()]; arr.sort(); assert_eq!(&arr, &["bar", "baz", "foo"]); } #[test] fn smoke_test_clone() { let count = if cfg!(miri) { 20 } else { 100 }; for _ in 0..count { drop(vec![ArcStr::from("foobar"); count]); drop(vec![ArcStr::from("baz quux"); count]); let lit = { arcstr::literal!("test 999") }; drop(vec![lit; count]); } drop(vec![ArcStr::default(); count]); } #[test] fn test_btreemap() { let mut m = std::collections::BTreeMap::new(); for i in 0..100 { let prev = m.insert(ArcStr::from(format!("key {}", i)), i); assert_eq!(prev, None); } for i in 0..100 { let s = format!("key {}", i); assert_eq!(m.remove(s.as_str()), Some(i)); } } #[test] fn test_hashmap() { let mut m = std::collections::HashMap::new(); for i in 0..100 { let prev = m.insert(ArcStr::from(format!("key {}", i)), i); assert_eq!(prev, None); } for i in 0..100 { let key = format!("key {}", i); let search = key.as_str(); assert_eq!(m[search], i); assert_eq!(m.remove(search), Some(i)); } } #[cfg(feature = "serde")] #[test] fn test_serde() { use serde_test::{assert_de_tokens, assert_tokens, Token}; let teststr = ArcStr::from("test test 123 456"); assert_tokens(&teststr, &[Token::BorrowedStr("test test 123 456")]); assert_tokens(&teststr.clone(), &[Token::BorrowedStr("test test 123 456")]); assert_tokens(&ArcStr::default(), &[Token::BorrowedStr("")]); let checks = &[ [Token::Str("123")], [Token::BorrowedStr("123")], [Token::String("123")], [Token::Bytes(b"123")], [Token::BorrowedBytes(b"123")], [Token::ByteBuf(b"123")], ]; for check in checks { eprintln!("checking {:?}", check); assert_de_tokens(&ArcStr::from("123"), check); } } #[test] fn test_loose_ends() { assert_eq!(ArcStr::default(), ""); assert_eq!("abc".parse::<ArcStr>().unwrap(), "abc"); let abc_arc = ArcStr::from("abc"); let abc_str: &str = abc_arc.as_ref(); let abc_bytes: &[u8] = abc_arc.as_ref(); assert_eq!(abc_str, "abc"); assert_eq!(abc_bytes, b"abc"); } #[test] fn test_from_into_raw() { let a = vec![ ArcStr::default(), ArcStr::from("1234"), ArcStr::from(format!("test {}", 1)), ]; let v = a.into_iter().cycle().take(100).collect::<Vec<ArcStr>>(); let v2 = v .iter() .map(|s| ArcStr::into_raw(s.clone())) .collect::<Vec<_>>(); drop(v); let back = v2 .iter() .map(|s| unsafe { ArcStr::from_raw(*s) }) .collect::<Vec<_>>(); let end = [ ArcStr::default(), ArcStr::from("1234"), ArcStr::from(format!("test {}", 1)), ] .iter() .cloned() .cycle() .take(100) .collect::<Vec<_>>(); assert_eq!(back, end); drop(back); } #[test] fn test_strong_count() { let foobar = ArcStr::from("foobar"); assert_eq!(Some(1), ArcStr::strong_count(&foobar)); let also_foobar = ArcStr::clone(&foobar); assert_eq!(Some(2), ArcStr::strong_count(&foobar)); assert_eq!(Some(2), ArcStr::strong_count(&also_foobar)); let astr = arcstr::literal!("baz"); assert_eq!(None, ArcStr::strong_count(&astr)); assert_eq!(None, ArcStr::strong_count(&ArcStr::default())); } #[test] fn test_ptr_eq() { let foobar = ArcStr::from("foobar"); let same_foobar = foobar.clone(); let other_foobar = ArcStr::from("foobar"); assert!(ArcStr::ptr_eq(&foobar, &same_foobar)); assert!(!ArcStr::ptr_eq(&foobar, &other_foobar)); const YET_AGAIN_A_DIFFERENT_FOOBAR: ArcStr = arcstr::literal!("foobar"); let strange_new_foobar = YET_AGAIN_A_DIFFERENT_FOOBAR.clone(); let wild_blue_foobar = strange_new_foobar.clone(); assert!(ArcStr::ptr_eq(&strange_new_foobar, &wild_blue_foobar)); } #[test] fn test_statics() { const STATIC: ArcStr = arcstr::literal!("Electricity!"); assert!(ArcStr::is_static(&STATIC)); assert_eq!(ArcStr::as_static(&STATIC), Some("Electricity!")); assert!(ArcStr::is_static(&ArcStr::new())); assert_eq!(ArcStr::as_static(&ArcStr::new()), Some("")); let st = { // Note that they don't have to be consts, just made using `literal!`: let still_static = { arcstr::literal!("Shocking!") }; assert!(ArcStr::is_static(&still_static)); assert_eq!(ArcStr::as_static(&still_static), Some("Shocking!")); assert_eq!(ArcStr::as_static(&still_static.clone()), Some("Shocking!")); // clones are still static assert_eq!( ArcStr::as_static(&still_static.clone().clone()), Some("Shocking!") ); ArcStr::as_static(&still_static).unwrap() }; assert_eq!(st, "Shocking!"); // But it won't work for other strings. let nonstatic = ArcStr::from("Grounded..."); assert_eq!(ArcStr::as_static(&nonstatic), None); } #[test] fn test_static_arcstr_include_bytes() { const APACHE: ArcStr = arcstr::literal!(include_str!("../LICENSE-APACHE")); assert!(APACHE.len() > 10000); assert!(APACHE.trim_start().starts_with("Apache License")); assert!(APACHE .trim_end() .ends_with("limitations under the License.")); } #[test] fn test_inherent_overrides() { let s = ArcStr::from("abc"); assert_eq!(s.as_str(), "abc"); let a = ArcStr::from("foo"); assert_eq!(a.len(), 3); assert!(!ArcStr::from("foo").is_empty()); assert!(ArcStr::new().is_empty()); } #[test] fn test_froms_more() { let mut s = "asdf".to_string(); { let s2: &mut str = &mut s; // Make sure we go through the right From let arc = <ArcStr as From<&mut str>>::from(s2); assert_eq!(arc, "asdf"); } let arc = <ArcStr as From<&String>>::from(&s); assert_eq!(arc, "asdf"); // This is a slightly more natural way to check, as it's when the "you a // weird From" situation comes up more often. let b: Option<Box<str>> = Some("abc".into()); assert_eq!(b.map(ArcStr::from), Some(ArcStr::from("abc"))); let b: Option<std::rc::Rc<str>> = Some("abc".into()); assert_eq!(b.map(ArcStr::from), Some(ArcStr::from("abc"))); let b: Option<std::sync::Arc<str>> = Some("abc".into()); assert_eq!(b.map(ArcStr::from), Some(ArcStr::from("abc"))); let bs: Box<str> = ArcStr::from("123").into(); assert_eq!(&bs[..], "123"); let rcs: std::rc::Rc<str> = ArcStr::from("123").into(); assert_eq!(&rcs[..], "123"); let arcs: std::sync::Arc<str> = ArcStr::from("123").into(); assert_eq!(&arcs[..], "123"); use std::borrow::Cow::{self, Borrowed, Owned}; let cow: Cow<'_, str> = Borrowed("abcd"); assert_eq!(ArcStr::from(cow), "abcd"); let cow: Cow<'_, str> = Owned("abcd".into()); assert_eq!(ArcStr::from(cow), "abcd"); let cow: Option<Cow<'_, str>> = Some(&arc).map(Cow::from); assert_eq!(cow.as_deref(), Some("asdf")); let cow: Option<Cow<'_, str>> = Some(arc).map(Cow::from); assert!(matches!(cow, Some(Cow::Owned(_)))); assert_eq!(cow.as_deref(), Some("asdf")); let st = { arcstr::literal!("static should borrow") }; { let cow: Option<Cow<'_, str>> = Some(st.clone()).map(Cow::from); assert!(matches!(cow, Some(Cow::Borrowed(_)))); assert_eq!(cow.as_deref(), Some("static should borrow")); } // works with any lifetime { let cow: Option<Cow<'static, str>> = Some(st.clone()).map(Cow::from); assert!(matches!(cow, Some(Cow::Borrowed(_)))); assert_eq!(cow.as_deref(), Some("static should borrow")); } let astr = ArcStr::from(&st); assert!(ArcStr::ptr_eq(&st, &astr)); // Check non-statics let astr2 = ArcStr::from("foobar"); assert!(ArcStr::ptr_eq(&astr2, &ArcStr::from(&astr2))) }
32.818452
89
0.559173
db0ea3cb895eb3058b67d9ad299e7538e8b010dc
1,603
use chrono::prelude::*; use db::DB; use serde::{Deserialize, Serialize}; use std::convert::Infallible; use warp::{Filter, Rejection}; type Result<T> = std::result::Result<T, error::Error>; type WebResult<T> = std::result::Result<T, Rejection>; mod db; mod error; mod handler; #[derive(Serialize, Deserialize, Debug)] pub struct Book { pub id: String, pub name: String, pub author: String, pub num_pages: usize, pub added_at: DateTime<Utc>, pub tags: Vec<String>, } #[tokio::main] async fn main() -> Result<()> { let db = DB::init().await?; let book = warp::path("book"); let book_routes = book .and(warp::post()) .and(warp::body::json()) .and(with_db(db.clone())) .and_then(handler::create_book_handler) .or(book .and(warp::put()) .and(warp::path::param()) .and(warp::body::json()) .and(with_db(db.clone())) .and_then(handler::edit_book_handler)) .or(book .and(warp::delete()) .and(warp::path::param()) .and(with_db(db.clone())) .and_then(handler::delete_book_handler)) .or(book .and(warp::get()) .and(with_db(db.clone())) .and_then(handler::books_list_handler)); let routes = book_routes.recover(error::handle_rejection); println!("Started on port 8080"); warp::serve(routes).run(([0, 0, 0, 0], 8080)).await; Ok(()) } fn with_db(db: DB) -> impl Filter<Extract = (DB,), Error = Infallible> + Clone { warp::any().map(move || db.clone()) }
26.278689
80
0.570805
26cc0b2c6a23cd772889819cfad701cd48c523e3
38,390
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Raw concurrency primitives you know and love. //! //! These primitives are not recommended for general use, but are provided for //! flavorful use-cases. It is recommended to use the types at the top of the //! `sync` crate which wrap values directly and provide safer abstractions for //! containing data. use core::prelude::*; use core::atomics; use core::finally::Finally; use core::kinds::marker; use core::mem; use core::ty::Unsafe; use collections::Vec; use mutex; use comm::{Receiver, Sender, channel}; /**************************************************************************** * Internals ****************************************************************************/ // Each waiting task receives on one of these. type WaitEnd = Receiver<()>; type SignalEnd = Sender<()>; // A doubly-ended queue of waiting tasks. struct WaitQueue { head: Receiver<SignalEnd>, tail: Sender<SignalEnd>, } impl WaitQueue { fn new() -> WaitQueue { let (block_tail, block_head) = channel(); WaitQueue { head: block_head, tail: block_tail } } // Signals one live task from the queue. fn signal(&self) -> bool { match self.head.try_recv() { Ok(ch) => { // Send a wakeup signal. If the waiter was killed, its port will // have closed. Keep trying until we get a live task. if ch.send_opt(()).is_ok() { true } else { self.signal() } } _ => false } } fn broadcast(&self) -> uint { let mut count = 0; loop { match self.head.try_recv() { Ok(ch) => { if ch.send_opt(()).is_ok() { count += 1; } } _ => break } } count } fn wait_end(&self) -> WaitEnd { let (signal_end, wait_end) = channel(); self.tail.send(signal_end); wait_end } } // The building-block used to make semaphores, mutexes, and rwlocks. struct Sem<Q> { lock: mutex::Mutex, // n.b, we need Sem to be `Share`, but the WaitQueue type is not send/share // (for good reason). We have an internal invariant on this semaphore, // however, that the queue is never accessed outside of a locked // context. inner: Unsafe<SemInner<Q>> } struct SemInner<Q> { count: int, waiters: WaitQueue, // Can be either unit or another waitqueue. Some sems shouldn't come with // a condition variable attached, others should. blocked: Q, } #[must_use] struct SemGuard<'a, Q> { sem: &'a Sem<Q>, } impl<Q: Send> Sem<Q> { fn new(count: int, q: Q) -> Sem<Q> { Sem { lock: mutex::Mutex::new(), inner: Unsafe::new(SemInner { waiters: WaitQueue::new(), count: count, blocked: q, }) } } unsafe fn with(&self, f: |&mut SemInner<Q>|) { let _g = self.lock.lock(); // This &mut is safe because, due to the lock, we are the only one who can touch the data f(&mut *self.inner.get()) } pub fn acquire(&self) { unsafe { let mut waiter_nobe = None; self.with(|state| { state.count -= 1; if state.count < 0 { // Create waiter nobe, enqueue ourself, and tell // outer scope we need to block. waiter_nobe = Some(state.waiters.wait_end()); } }); // Uncomment if you wish to test for sem races. Not // valgrind-friendly. /* for _ in range(0u, 1000) { task::deschedule(); } */ // Need to wait outside the exclusive. if waiter_nobe.is_some() { let _ = waiter_nobe.unwrap().recv(); } } } pub fn release(&self) { unsafe { self.with(|state| { state.count += 1; if state.count <= 0 { state.waiters.signal(); } }) } } pub fn access<'a>(&'a self) -> SemGuard<'a, Q> { self.acquire(); SemGuard { sem: self } } } #[unsafe_destructor] impl<'a, Q: Send> Drop for SemGuard<'a, Q> { fn drop(&mut self) { self.sem.release(); } } impl Sem<Vec<WaitQueue>> { fn new_and_signal(count: int, num_condvars: uint) -> Sem<Vec<WaitQueue>> { let mut queues = Vec::new(); for _ in range(0, num_condvars) { queues.push(WaitQueue::new()); } Sem::new(count, queues) } // The only other places that condvars get built are rwlock.write_cond() // and rwlock_write_mode. pub fn access_cond<'a>(&'a self) -> SemCondGuard<'a> { SemCondGuard { guard: self.access(), cvar: Condvar { sem: self, order: Nothing, nocopy: marker::NoCopy }, } } } // FIXME(#3598): Want to use an Option down below, but we need a custom enum // that's not polymorphic to get around the fact that lifetimes are invariant // inside of type parameters. enum ReacquireOrderLock<'a> { Nothing, // c.c Just(&'a Semaphore), } /// A mechanism for atomic-unlock-and-deschedule blocking and signalling. pub struct Condvar<'a> { // The 'Sem' object associated with this condvar. This is the one that's // atomically-unlocked-and-descheduled upon and reacquired during wakeup. sem: &'a Sem<Vec<WaitQueue> >, // This is (can be) an extra semaphore which is held around the reacquire // operation on the first one. This is only used in cvars associated with // rwlocks, and is needed to ensure that, when a downgrader is trying to // hand off the access lock (which would be the first field, here), a 2nd // writer waking up from a cvar wait can't race with a reader to steal it, // See the comment in write_cond for more detail. order: ReacquireOrderLock<'a>, // Make sure condvars are non-copyable. nocopy: marker::NoCopy, } impl<'a> Condvar<'a> { /// Atomically drop the associated lock, and block until a signal is sent. /// /// # Failure /// /// A task which is killed while waiting on a condition variable will wake /// up, fail, and unlock the associated lock as it unwinds. pub fn wait(&self) { self.wait_on(0) } /// As wait(), but can specify which of multiple condition variables to /// wait on. Only a signal_on() or broadcast_on() with the same condvar_id /// will wake this thread. /// /// The associated lock must have been initialised with an appropriate /// number of condvars. The condvar_id must be between 0 and num_condvars-1 /// or else this call will fail. /// /// wait() is equivalent to wait_on(0). pub fn wait_on(&self, condvar_id: uint) { let mut wait_end = None; let mut out_of_bounds = None; // Release lock, 'atomically' enqueuing ourselves in so doing. unsafe { self.sem.with(|state| { if condvar_id < state.blocked.len() { // Drop the lock. state.count += 1; if state.count <= 0 { state.waiters.signal(); } // Create waiter nobe, and enqueue ourself to // be woken up by a signaller. wait_end = Some(state.blocked.get(condvar_id).wait_end()); } else { out_of_bounds = Some(state.blocked.len()); } }) } // If deschedule checks start getting inserted anywhere, we can be // killed before or after enqueueing. check_cvar_bounds(out_of_bounds, condvar_id, "cond.wait_on()", || { // Unconditionally "block". (Might not actually block if a // signaller already sent -- I mean 'unconditionally' in contrast // with acquire().) (|| { let _ = wait_end.take_unwrap().recv(); }).finally(|| { // Reacquire the condvar. match self.order { Just(lock) => { let _g = lock.access(); self.sem.acquire(); } Nothing => self.sem.acquire(), } }) }) } /// Wake up a blocked task. Returns false if there was no blocked task. pub fn signal(&self) -> bool { self.signal_on(0) } /// As signal, but with a specified condvar_id. See wait_on. pub fn signal_on(&self, condvar_id: uint) -> bool { unsafe { let mut out_of_bounds = None; let mut result = false; self.sem.with(|state| { if condvar_id < state.blocked.len() { result = state.blocked.get(condvar_id).signal(); } else { out_of_bounds = Some(state.blocked.len()); } }); check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()", || result) } } /// Wake up all blocked tasks. Returns the number of tasks woken. pub fn broadcast(&self) -> uint { self.broadcast_on(0) } /// As broadcast, but with a specified condvar_id. See wait_on. pub fn broadcast_on(&self, condvar_id: uint) -> uint { let mut out_of_bounds = None; let mut queue = None; unsafe { self.sem.with(|state| { if condvar_id < state.blocked.len() { // To avoid :broadcast_heavy, we make a new waitqueue, // swap it out with the old one, and broadcast on the // old one outside of the little-lock. queue = Some(mem::replace(state.blocked.get_mut(condvar_id), WaitQueue::new())); } else { out_of_bounds = Some(state.blocked.len()); } }); check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()", || { queue.take_unwrap().broadcast() }) } } } // Checks whether a condvar ID was out of bounds, and fails if so, or does // something else next on success. #[inline] fn check_cvar_bounds<U>( out_of_bounds: Option<uint>, id: uint, act: &str, blk: || -> U) -> U { match out_of_bounds { Some(0) => fail!("{} with illegal ID {} - this lock has no condvars!", act, id), Some(length) => fail!("{} with illegal ID {} - ID must be less than {}", act, id, length), None => blk() } } #[must_use] struct SemCondGuard<'a> { guard: SemGuard<'a, Vec<WaitQueue>>, cvar: Condvar<'a>, } /**************************************************************************** * Semaphores ****************************************************************************/ /// A counting, blocking, bounded-waiting semaphore. pub struct Semaphore { sem: Sem<()>, } /// An RAII guard used to represent an acquired resource to a semaphore. When /// dropped, this value will release the resource back to the semaphore. #[must_use] pub struct SemaphoreGuard<'a> { _guard: SemGuard<'a, ()>, } impl Semaphore { /// Create a new semaphore with the specified count. pub fn new(count: int) -> Semaphore { Semaphore { sem: Sem::new(count, ()) } } /// Acquire a resource represented by the semaphore. Blocks if necessary /// until resource(s) become available. pub fn acquire(&self) { self.sem.acquire() } /// Release a held resource represented by the semaphore. Wakes a blocked /// contending task, if any exist. Won't block the caller. pub fn release(&self) { self.sem.release() } /// Acquire a resource of this semaphore, returning an RAII guard which will /// release the resource when dropped. pub fn access<'a>(&'a self) -> SemaphoreGuard<'a> { SemaphoreGuard { _guard: self.sem.access() } } } /**************************************************************************** * Mutexes ****************************************************************************/ /// A blocking, bounded-waiting, mutual exclusion lock with an associated /// FIFO condition variable. /// /// # Failure /// A task which fails while holding a mutex will unlock the mutex as it /// unwinds. pub struct Mutex { sem: Sem<Vec<WaitQueue>>, } /// An RAII structure which is used to gain access to a mutex's condition /// variable. Additionally, when a value of this type is dropped, the /// corresponding mutex is also unlocked. #[must_use] pub struct MutexGuard<'a> { _guard: SemGuard<'a, Vec<WaitQueue>>, /// Inner condition variable which is connected to the outer mutex, and can /// be used for atomic-unlock-and-deschedule. pub cond: Condvar<'a>, } impl Mutex { /// Create a new mutex, with one associated condvar. pub fn new() -> Mutex { Mutex::new_with_condvars(1) } /// Create a new mutex, with a specified number of associated condvars. This /// will allow calling wait_on/signal_on/broadcast_on with condvar IDs /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be /// allowed but any operations on the condvar will fail.) pub fn new_with_condvars(num_condvars: uint) -> Mutex { Mutex { sem: Sem::new_and_signal(1, num_condvars) } } /// Acquires ownership of this mutex, returning an RAII guard which will /// unlock the mutex when dropped. The associated condition variable can /// also be accessed through the returned guard. pub fn lock<'a>(&'a self) -> MutexGuard<'a> { let SemCondGuard { guard, cvar } = self.sem.access_cond(); MutexGuard { _guard: guard, cond: cvar } } } /**************************************************************************** * Reader-writer locks ****************************************************************************/ // NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem /// A blocking, no-starvation, reader-writer lock with an associated condvar. /// /// # Failure /// /// A task which fails while holding an rwlock will unlock the rwlock as it /// unwinds. pub struct RWLock { order_lock: Semaphore, access_lock: Sem<Vec<WaitQueue>>, // The only way the count flag is ever accessed is with xadd. Since it is // a read-modify-write operation, multiple xadds on different cores will // always be consistent with respect to each other, so a monotonic/relaxed // consistency ordering suffices (i.e., no extra barriers are needed). // // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use // acquire/release orderings superfluously. Change these someday. read_count: atomics::AtomicUint, } /// An RAII helper which is created by acquiring a read lock on an RWLock. When /// dropped, this will unlock the RWLock. #[must_use] pub struct RWLockReadGuard<'a> { lock: &'a RWLock, } /// An RAII helper which is created by acquiring a write lock on an RWLock. When /// dropped, this will unlock the RWLock. /// /// A value of this type can also be consumed to downgrade to a read-only lock. #[must_use] pub struct RWLockWriteGuard<'a> { lock: &'a RWLock, /// Inner condition variable that is connected to the write-mode of the /// outer rwlock. pub cond: Condvar<'a>, } impl RWLock { /// Create a new rwlock, with one associated condvar. pub fn new() -> RWLock { RWLock::new_with_condvars(1) } /// Create a new rwlock, with a specified number of associated condvars. /// Similar to mutex_with_condvars. pub fn new_with_condvars(num_condvars: uint) -> RWLock { RWLock { order_lock: Semaphore::new(1), access_lock: Sem::new_and_signal(1, num_condvars), read_count: atomics::AtomicUint::new(0), } } /// Acquires a read-lock, returning an RAII guard that will unlock the lock /// when dropped. Calls to 'read' from other tasks may run concurrently with /// this one. pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> { let _guard = self.order_lock.access(); let old_count = self.read_count.fetch_add(1, atomics::Acquire); if old_count == 0 { self.access_lock.acquire(); } RWLockReadGuard { lock: self } } /// Acquire a write-lock, returning an RAII guard that will unlock the lock /// when dropped. No calls to 'read' or 'write' from other tasks will run /// concurrently with this one. /// /// You can also downgrade a write to a read by calling the `downgrade` /// method on the returned guard. Additionally, the guard will contain a /// `Condvar` attached to this lock. /// /// # Example /// /// ```rust /// use sync::raw::RWLock; /// /// let lock = RWLock::new(); /// let write = lock.write(); /// // ... exclusive access ... /// let read = write.downgrade(); /// // ... shared access ... /// drop(read); /// ``` pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a> { let _g = self.order_lock.access(); self.access_lock.acquire(); // It's important to thread our order lock into the condvar, so that // when a cond.wait() wakes up, it uses it while reacquiring the // access lock. If we permitted a waking-up writer to "cut in line", // there could arise a subtle race when a downgrader attempts to hand // off the reader cloud lock to a waiting reader. This race is tested // in arc.rs (test_rw_write_cond_downgrade_read_race) and looks like: // T1 (writer) T2 (downgrader) T3 (reader) // [in cond.wait()] // [locks for writing] // [holds access_lock] // [is signalled, perhaps by // downgrader or a 4th thread] // tries to lock access(!) // lock order_lock // xadd read_count[0->1] // tries to lock access // [downgrade] // xadd read_count[1->2] // unlock access // Since T1 contended on the access lock before T3 did, it will steal // the lock handoff. Adding order_lock in the condvar reacquire path // solves this because T1 will hold order_lock while waiting on access, // which will cause T3 to have to wait until T1 finishes its write, // which can't happen until T2 finishes the downgrade-read entirely. // The astute reader will also note that making waking writers use the // order_lock is better for not starving readers. RWLockWriteGuard { lock: self, cond: Condvar { sem: &self.access_lock, order: Just(&self.order_lock), nocopy: marker::NoCopy, } } } } impl<'a> RWLockWriteGuard<'a> { /// Consumes this write lock and converts it into a read lock. pub fn downgrade(self) -> RWLockReadGuard<'a> { let lock = self.lock; // Don't run the destructor of the write guard, we're in charge of // things from now on unsafe { mem::forget(self) } let old_count = lock.read_count.fetch_add(1, atomics::Release); // If another reader was already blocking, we need to hand-off // the "reader cloud" access lock to them. if old_count != 0 { // Guaranteed not to let another writer in, because // another reader was holding the order_lock. Hence they // must be the one to get the access_lock (because all // access_locks are acquired with order_lock held). See // the comment in write_cond for more justification. lock.access_lock.release(); } RWLockReadGuard { lock: lock } } } #[unsafe_destructor] impl<'a> Drop for RWLockWriteGuard<'a> { fn drop(&mut self) { self.lock.access_lock.release(); } } #[unsafe_destructor] impl<'a> Drop for RWLockReadGuard<'a> { fn drop(&mut self) { let old_count = self.lock.read_count.fetch_sub(1, atomics::Release); assert!(old_count > 0); if old_count == 1 { // Note: this release used to be outside of a locked access // to exclusive-protected state. If this code is ever // converted back to such (instead of using atomic ops), // this access MUST NOT go inside the exclusive access. self.lock.access_lock.release(); } } } /**************************************************************************** * Tests ****************************************************************************/ #[cfg(test)] mod tests { use std::prelude::*; use Arc; use super::{Semaphore, Mutex, RWLock, Condvar}; use std::mem; use std::result; use std::task; /************************************************************************ * Semaphore tests ************************************************************************/ #[test] fn test_sem_acquire_release() { let s = Semaphore::new(1); s.acquire(); s.release(); s.acquire(); } #[test] fn test_sem_basic() { let s = Semaphore::new(1); let _g = s.access(); } #[test] fn test_sem_as_mutex() { let s = Arc::new(Semaphore::new(1)); let s2 = s.clone(); task::spawn(proc() { let _g = s2.access(); for _ in range(0u, 5) { task::deschedule(); } }); let _g = s.access(); for _ in range(0u, 5) { task::deschedule(); } } #[test] fn test_sem_as_cvar() { /* Child waits and parent signals */ let (tx, rx) = channel(); let s = Arc::new(Semaphore::new(0)); let s2 = s.clone(); task::spawn(proc() { s2.acquire(); tx.send(()); }); for _ in range(0u, 5) { task::deschedule(); } s.release(); let _ = rx.recv(); /* Parent waits and child signals */ let (tx, rx) = channel(); let s = Arc::new(Semaphore::new(0)); let s2 = s.clone(); task::spawn(proc() { for _ in range(0u, 5) { task::deschedule(); } s2.release(); let _ = rx.recv(); }); s.acquire(); tx.send(()); } #[test] fn test_sem_multi_resource() { // Parent and child both get in the critical section at the same // time, and shake hands. let s = Arc::new(Semaphore::new(2)); let s2 = s.clone(); let (tx1, rx1) = channel(); let (tx2, rx2) = channel(); task::spawn(proc() { let _g = s2.access(); let _ = rx2.recv(); tx1.send(()); }); let _g = s.access(); tx2.send(()); let _ = rx1.recv(); } #[test] fn test_sem_runtime_friendly_blocking() { // Force the runtime to schedule two threads on the same sched_loop. // When one blocks, it should schedule the other one. let s = Arc::new(Semaphore::new(1)); let s2 = s.clone(); let (tx, rx) = channel(); { let _g = s.access(); task::spawn(proc() { tx.send(()); drop(s2.access()); tx.send(()); }); rx.recv(); // wait for child to come alive for _ in range(0u, 5) { task::deschedule(); } // let the child contend } rx.recv(); // wait for child to be done } /************************************************************************ * Mutex tests ************************************************************************/ #[test] fn test_mutex_lock() { // Unsafely achieve shared state, and do the textbook // "load tmp = move ptr; inc tmp; store ptr <- tmp" dance. let (tx, rx) = channel(); let m = Arc::new(Mutex::new()); let m2 = m.clone(); let mut sharedstate = box 0; { let ptr: *mut int = &mut *sharedstate; task::spawn(proc() { access_shared(ptr, &m2, 10); tx.send(()); }); } { access_shared(&mut *sharedstate, &m, 10); let _ = rx.recv(); assert_eq!(*sharedstate, 20); } fn access_shared(sharedstate: *mut int, m: &Arc<Mutex>, n: uint) { for _ in range(0u, n) { let _g = m.lock(); let oldval = unsafe { *sharedstate }; task::deschedule(); unsafe { *sharedstate = oldval + 1; } } } } #[test] fn test_mutex_cond_wait() { let m = Arc::new(Mutex::new()); // Child wakes up parent { let lock = m.lock(); let m2 = m.clone(); task::spawn(proc() { let lock = m2.lock(); let woken = lock.cond.signal(); assert!(woken); }); lock.cond.wait(); } // Parent wakes up child let (tx, rx) = channel(); let m3 = m.clone(); task::spawn(proc() { let lock = m3.lock(); tx.send(()); lock.cond.wait(); tx.send(()); }); rx.recv(); // Wait until child gets in the mutex { let lock = m.lock(); let woken = lock.cond.signal(); assert!(woken); } rx.recv(); // Wait until child wakes up } fn test_mutex_cond_broadcast_helper(num_waiters: uint) { let m = Arc::new(Mutex::new()); let mut rxs = Vec::new(); for _ in range(0u, num_waiters) { let mi = m.clone(); let (tx, rx) = channel(); rxs.push(rx); task::spawn(proc() { let lock = mi.lock(); tx.send(()); lock.cond.wait(); tx.send(()); }); } // wait until all children get in the mutex for rx in rxs.mut_iter() { rx.recv(); } { let lock = m.lock(); let num_woken = lock.cond.broadcast(); assert_eq!(num_woken, num_waiters); } // wait until all children wake up for rx in rxs.mut_iter() { rx.recv(); } } #[test] fn test_mutex_cond_broadcast() { test_mutex_cond_broadcast_helper(12); } #[test] fn test_mutex_cond_broadcast_none() { test_mutex_cond_broadcast_helper(0); } #[test] fn test_mutex_cond_no_waiter() { let m = Arc::new(Mutex::new()); let m2 = m.clone(); let _ = task::try(proc() { drop(m.lock()); }); let lock = m2.lock(); assert!(!lock.cond.signal()); } #[test] fn test_mutex_killed_simple() { use std::any::Any; // Mutex must get automatically unlocked if failed/killed within. let m = Arc::new(Mutex::new()); let m2 = m.clone(); let result: result::Result<(), Box<Any + Send>> = task::try(proc() { let _lock = m2.lock(); fail!(); }); assert!(result.is_err()); // child task must have finished by the time try returns drop(m.lock()); } #[test] fn test_mutex_cond_signal_on_0() { // Tests that signal_on(0) is equivalent to signal(). let m = Arc::new(Mutex::new()); let lock = m.lock(); let m2 = m.clone(); task::spawn(proc() { let lock = m2.lock(); lock.cond.signal_on(0); }); lock.cond.wait(); } #[test] fn test_mutex_no_condvars() { let result = task::try(proc() { let m = Mutex::new_with_condvars(0); m.lock().cond.wait(); }); assert!(result.is_err()); let result = task::try(proc() { let m = Mutex::new_with_condvars(0); m.lock().cond.signal(); }); assert!(result.is_err()); let result = task::try(proc() { let m = Mutex::new_with_condvars(0); m.lock().cond.broadcast(); }); assert!(result.is_err()); } /************************************************************************ * Reader/writer lock tests ************************************************************************/ #[cfg(test)] pub enum RWLockMode { Read, Write, Downgrade, DowngradeRead } #[cfg(test)] fn lock_rwlock_in_mode(x: &Arc<RWLock>, mode: RWLockMode, blk: ||) { match mode { Read => { let _g = x.read(); blk() } Write => { let _g = x.write(); blk() } Downgrade => { let _g = x.write(); blk() } DowngradeRead => { let _g = x.write().downgrade(); blk() } } } #[cfg(test)] fn test_rwlock_exclusion(x: Arc<RWLock>, mode1: RWLockMode, mode2: RWLockMode) { // Test mutual exclusion between readers and writers. Just like the // mutex mutual exclusion test, a ways above. let (tx, rx) = channel(); let x2 = x.clone(); let mut sharedstate = box 0; { let ptr: *const int = &*sharedstate; task::spawn(proc() { let sharedstate: &mut int = unsafe { mem::transmute(ptr) }; access_shared(sharedstate, &x2, mode1, 10); tx.send(()); }); } { access_shared(&mut *sharedstate, &x, mode2, 10); let _ = rx.recv(); assert_eq!(*sharedstate, 20); } fn access_shared(sharedstate: &mut int, x: &Arc<RWLock>, mode: RWLockMode, n: uint) { for _ in range(0u, n) { lock_rwlock_in_mode(x, mode, || { let oldval = *sharedstate; task::deschedule(); *sharedstate = oldval + 1; }) } } } #[test] fn test_rwlock_readers_wont_modify_the_data() { test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Write); test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Read); test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Downgrade); test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Read); test_rwlock_exclusion(Arc::new(RWLock::new()), Write, DowngradeRead); test_rwlock_exclusion(Arc::new(RWLock::new()), DowngradeRead, Write); } #[test] fn test_rwlock_writers_and_writers() { test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Write); test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Downgrade); test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Write); test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Downgrade); } #[cfg(test)] fn test_rwlock_handshake(x: Arc<RWLock>, mode1: RWLockMode, mode2: RWLockMode, make_mode2_go_first: bool) { // Much like sem_multi_resource. let x2 = x.clone(); let (tx1, rx1) = channel(); let (tx2, rx2) = channel(); task::spawn(proc() { if !make_mode2_go_first { rx2.recv(); // parent sends to us once it locks, or ... } lock_rwlock_in_mode(&x2, mode2, || { if make_mode2_go_first { tx1.send(()); // ... we send to it once we lock } rx2.recv(); tx1.send(()); }) }); if make_mode2_go_first { rx1.recv(); // child sends to us once it locks, or ... } lock_rwlock_in_mode(&x, mode1, || { if !make_mode2_go_first { tx2.send(()); // ... we send to it once we lock } tx2.send(()); rx1.recv(); }) } #[test] fn test_rwlock_readers_and_readers() { test_rwlock_handshake(Arc::new(RWLock::new()), Read, Read, false); // The downgrader needs to get in before the reader gets in, otherwise // they cannot end up reading at the same time. test_rwlock_handshake(Arc::new(RWLock::new()), DowngradeRead, Read, false); test_rwlock_handshake(Arc::new(RWLock::new()), Read, DowngradeRead, true); // Two downgrade_reads can never both end up reading at the same time. } #[test] fn test_rwlock_downgrade_unlock() { // Tests that downgrade can unlock the lock in both modes let x = Arc::new(RWLock::new()); lock_rwlock_in_mode(&x, Downgrade, || { }); test_rwlock_handshake(x, Read, Read, false); let y = Arc::new(RWLock::new()); lock_rwlock_in_mode(&y, DowngradeRead, || { }); test_rwlock_exclusion(y, Write, Write); } #[test] fn test_rwlock_read_recursive() { let x = RWLock::new(); let _g1 = x.read(); let _g2 = x.read(); } #[test] fn test_rwlock_cond_wait() { // As test_mutex_cond_wait above. let x = Arc::new(RWLock::new()); // Child wakes up parent { let lock = x.write(); let x2 = x.clone(); task::spawn(proc() { let lock = x2.write(); assert!(lock.cond.signal()); }); lock.cond.wait(); } // Parent wakes up child let (tx, rx) = channel(); let x3 = x.clone(); task::spawn(proc() { let lock = x3.write(); tx.send(()); lock.cond.wait(); tx.send(()); }); rx.recv(); // Wait until child gets in the rwlock drop(x.read()); // Must be able to get in as a reader { let x = x.write(); assert!(x.cond.signal()); } rx.recv(); // Wait until child wakes up drop(x.read()); // Just for good measure } #[cfg(test)] fn test_rwlock_cond_broadcast_helper(num_waiters: uint) { // Much like the mutex broadcast test. Downgrade-enabled. fn lock_cond(x: &Arc<RWLock>, blk: |c: &Condvar|) { let lock = x.write(); blk(&lock.cond); } let x = Arc::new(RWLock::new()); let mut rxs = Vec::new(); for _ in range(0u, num_waiters) { let xi = x.clone(); let (tx, rx) = channel(); rxs.push(rx); task::spawn(proc() { lock_cond(&xi, |cond| { tx.send(()); cond.wait(); tx.send(()); }) }); } // wait until all children get in the mutex for rx in rxs.mut_iter() { let _ = rx.recv(); } lock_cond(&x, |cond| { let num_woken = cond.broadcast(); assert_eq!(num_woken, num_waiters); }); // wait until all children wake up for rx in rxs.mut_iter() { let _ = rx.recv(); } } #[test] fn test_rwlock_cond_broadcast() { test_rwlock_cond_broadcast_helper(0); test_rwlock_cond_broadcast_helper(12); } #[cfg(test)] fn rwlock_kill_helper(mode1: RWLockMode, mode2: RWLockMode) { use std::any::Any; // Mutex must get automatically unlocked if failed/killed within. let x = Arc::new(RWLock::new()); let x2 = x.clone(); let result: result::Result<(), Box<Any + Send>> = task::try(proc() { lock_rwlock_in_mode(&x2, mode1, || { fail!(); }) }); assert!(result.is_err()); // child task must have finished by the time try returns lock_rwlock_in_mode(&x, mode2, || { }) } #[test] fn test_rwlock_reader_killed_writer() { rwlock_kill_helper(Read, Write); } #[test] fn test_rwlock_writer_killed_reader() { rwlock_kill_helper(Write, Read); } #[test] fn test_rwlock_reader_killed_reader() { rwlock_kill_helper(Read, Read); } #[test] fn test_rwlock_writer_killed_writer() { rwlock_kill_helper(Write, Write); } #[test] fn test_rwlock_kill_downgrader() { rwlock_kill_helper(Downgrade, Read); rwlock_kill_helper(Read, Downgrade); rwlock_kill_helper(Downgrade, Write); rwlock_kill_helper(Write, Downgrade); rwlock_kill_helper(DowngradeRead, Read); rwlock_kill_helper(Read, DowngradeRead); rwlock_kill_helper(DowngradeRead, Write); rwlock_kill_helper(Write, DowngradeRead); rwlock_kill_helper(DowngradeRead, Downgrade); rwlock_kill_helper(DowngradeRead, Downgrade); rwlock_kill_helper(Downgrade, DowngradeRead); rwlock_kill_helper(Downgrade, DowngradeRead); } }
34.523381
97
0.522089
7611871b0e760f797256d7708c7464730acd58cb
10,299
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-fast: check-fast screws up repr paths #[feature(macro_rules)]; #[deny(warnings)]; use std::fmt; use std::rt::io::Decorator; use std::rt::io::mem::MemWriter; use std::rt::io; use std::rt::io::Writer; use std::str; struct A; struct B; #[fmt="foo"] impl fmt::Signed for A { fn fmt(_: &A, f: &mut fmt::Formatter) { f.buf.write("aloha".as_bytes()); } } impl fmt::Signed for B { fn fmt(_: &B, f: &mut fmt::Formatter) { f.buf.write("adios".as_bytes()); } } macro_rules! t(($a:expr, $b:expr) => { assert_eq!($a, $b.to_owned()) }) pub fn main() { // Make sure there's a poly formatter that takes anything t!(format!("{:?}", 1), "1"); t!(format!("{:?}", A), "A"); t!(format!("{:?}", ()), "()"); t!(format!("{:?}", @(~1, "foo")), "@(~1, \"foo\")"); // Various edge cases without formats t!(format!(""), ""); t!(format!("hello"), "hello"); t!(format!("hello \\{"), "hello {"); // default formatters should work t!(format!("{}", 1i), "1"); t!(format!("{}", 1i8), "1"); t!(format!("{}", 1i16), "1"); t!(format!("{}", 1i32), "1"); t!(format!("{}", 1i64), "1"); t!(format!("{}", 1u), "1"); t!(format!("{}", 1u8), "1"); t!(format!("{}", 1u16), "1"); t!(format!("{}", 1u32), "1"); t!(format!("{}", 1u64), "1"); t!(format!("{}", 1.0f32), "1"); t!(format!("{}", 1.0f64), "1"); t!(format!("{}", "a"), "a"); t!(format!("{}", ~"a"), "a"); t!(format!("{}", @"a"), "a"); t!(format!("{}", false), "false"); t!(format!("{}", 'a'), "a"); // At least exercise all the formats t!(format!("{:b}", true), "true"); t!(format!("{:c}", '☃'), "☃"); t!(format!("{:d}", 10), "10"); t!(format!("{:i}", 10), "10"); t!(format!("{:u}", 10u), "10"); t!(format!("{:o}", 10u), "12"); t!(format!("{:x}", 10u), "a"); t!(format!("{:X}", 10u), "A"); t!(format!("{:s}", "foo"), "foo"); t!(format!("{:s}", ~"foo"), "foo"); t!(format!("{:s}", @"foo"), "foo"); t!(format!("{:p}", 0x1234 as *int), "0x1234"); t!(format!("{:p}", 0x1234 as *mut int), "0x1234"); t!(format!("{:d}", A), "aloha"); t!(format!("{:d}", B), "adios"); t!(format!("foo {:s} ☃☃☃☃☃☃", "bar"), "foo bar ☃☃☃☃☃☃"); t!(format!("{1} {0}", 0, 1), "1 0"); t!(format!("{foo} {bar}", foo=0, bar=1), "0 1"); t!(format!("{foo} {1} {bar} {0}", 0, 1, foo=2, bar=3), "2 1 3 0"); t!(format!("{} {0}", "a"), "a a"); t!(format!("{foo_bar}", foo_bar=1), "1"); // Methods should probably work t!(format!("{0, plural, =1{a#} =2{b#} zero{c#} other{d#}}", 0u), "c0"); t!(format!("{0, plural, =1{a#} =2{b#} zero{c#} other{d#}}", 1u), "a1"); t!(format!("{0, plural, =1{a#} =2{b#} zero{c#} other{d#}}", 2u), "b2"); t!(format!("{0, plural, =1{a#} =2{b#} zero{c#} other{d#}}", 3u), "d3"); t!(format!("{0, select, a{a#} b{b#} c{c#} other{d#}}", "a"), "aa"); t!(format!("{0, select, a{a#} b{b#} c{c#} other{d#}}", "b"), "bb"); t!(format!("{0, select, a{a#} b{b#} c{c#} other{d#}}", "c"), "cc"); t!(format!("{0, select, a{a#} b{b#} c{c#} other{d#}}", "d"), "dd"); t!(format!("{1, select, a{#{0:s}} other{#}}", "b", "a"), "ab"); t!(format!("{1, select, a{#{0}} other{#}}", "c", "b"), "b"); // Formatting strings and their arguments t!(format!("{:s}", "a"), "a"); t!(format!("{:4s}", "a"), "a "); t!(format!("{:>4s}", "a"), " a"); t!(format!("{:<4s}", "a"), "a "); t!(format!("{:.4s}", "a"), "a"); t!(format!("{:4.4s}", "a"), "a "); t!(format!("{:4.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa"); t!(format!("{:<4.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa"); t!(format!("{:>4.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa"); t!(format!("{:>10.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa"); t!(format!("{:2.4s}", "aaaaa"), "aaaa"); t!(format!("{:2.4s}", "aaaa"), "aaaa"); t!(format!("{:2.4s}", "aaa"), "aaa"); t!(format!("{:2.4s}", "aa"), "aa"); t!(format!("{:2.4s}", "a"), "a "); t!(format!("{:0>2s}", "a"), "0a"); t!(format!("{:.*s}", 4, "aaaaaaaaaaaaaaaaaa"), "aaaa"); t!(format!("{:.1$s}", "aaaaaaaaaaaaaaaaaa", 4), "aaaa"); t!(format!("{:1$s}", "a", 4), "a "); t!(format!("{:-#s}", "a"), "a"); t!(format!("{:+#s}", "a"), "a"); // Formatting integers should select the right implementation based off the // type of the argument. Also, hex/octal/binary should be defined for // integers, but they shouldn't emit the negative sign. t!(format!("{:d}", -1i), "-1"); t!(format!("{:d}", -1i8), "-1"); t!(format!("{:d}", -1i16), "-1"); t!(format!("{:d}", -1i32), "-1"); t!(format!("{:d}", -1i64), "-1"); t!(format!("{:t}", 1i), "1"); t!(format!("{:t}", 1i8), "1"); t!(format!("{:t}", 1i16), "1"); t!(format!("{:t}", 1i32), "1"); t!(format!("{:t}", 1i64), "1"); t!(format!("{:x}", 1i), "1"); t!(format!("{:x}", 1i8), "1"); t!(format!("{:x}", 1i16), "1"); t!(format!("{:x}", 1i32), "1"); t!(format!("{:x}", 1i64), "1"); t!(format!("{:X}", 1i), "1"); t!(format!("{:X}", 1i8), "1"); t!(format!("{:X}", 1i16), "1"); t!(format!("{:X}", 1i32), "1"); t!(format!("{:X}", 1i64), "1"); t!(format!("{:o}", 1i), "1"); t!(format!("{:o}", 1i8), "1"); t!(format!("{:o}", 1i16), "1"); t!(format!("{:o}", 1i32), "1"); t!(format!("{:o}", 1i64), "1"); t!(format!("{:u}", 1u), "1"); t!(format!("{:u}", 1u8), "1"); t!(format!("{:u}", 1u16), "1"); t!(format!("{:u}", 1u32), "1"); t!(format!("{:u}", 1u64), "1"); t!(format!("{:t}", 1u), "1"); t!(format!("{:t}", 1u8), "1"); t!(format!("{:t}", 1u16), "1"); t!(format!("{:t}", 1u32), "1"); t!(format!("{:t}", 1u64), "1"); t!(format!("{:x}", 1u), "1"); t!(format!("{:x}", 1u8), "1"); t!(format!("{:x}", 1u16), "1"); t!(format!("{:x}", 1u32), "1"); t!(format!("{:x}", 1u64), "1"); t!(format!("{:X}", 1u), "1"); t!(format!("{:X}", 1u8), "1"); t!(format!("{:X}", 1u16), "1"); t!(format!("{:X}", 1u32), "1"); t!(format!("{:X}", 1u64), "1"); t!(format!("{:o}", 1u), "1"); t!(format!("{:o}", 1u8), "1"); t!(format!("{:o}", 1u16), "1"); t!(format!("{:o}", 1u32), "1"); t!(format!("{:o}", 1u64), "1"); // Test the flags for formatting integers t!(format!("{:3d}", 1), " 1"); t!(format!("{:>3d}", 1), " 1"); t!(format!("{:>+3d}", 1), " +1"); t!(format!("{:<3d}", 1), "1 "); t!(format!("{:#d}", 1), "1"); t!(format!("{:#x}", 10), "0xa"); t!(format!("{:#X}", 10), "0xA"); t!(format!("{:#5x}", 10), " 0xa"); t!(format!("{:#o}", 10), "0o12"); t!(format!("{:08x}", 10), "0000000a"); t!(format!("{:8x}", 10), " a"); t!(format!("{:<8x}", 10), "a "); t!(format!("{:>8x}", 10), " a"); t!(format!("{:#08x}", 10), "0x00000a"); t!(format!("{:08d}", -10), "-0000010"); t!(format!("{:x}", -1u8), "ff"); t!(format!("{:X}", -1u8), "FF"); t!(format!("{:t}", -1u8), "11111111"); t!(format!("{:o}", -1u8), "377"); t!(format!("{:#x}", -1u8), "0xff"); t!(format!("{:#X}", -1u8), "0xFF"); t!(format!("{:#t}", -1u8), "0b11111111"); t!(format!("{:#o}", -1u8), "0o377"); // Signed combinations t!(format!("{:+5d}", 1), " +1"); t!(format!("{:+5d}", -1), " -1"); t!(format!("{:05d}", 1), "00001"); t!(format!("{:05d}", -1), "-0001"); t!(format!("{:+05d}", 1), "+0001"); t!(format!("{:+05d}", -1), "-0001"); // Some float stuff t!(format!("{:f}", 1.0f32), "1"); t!(format!("{:f}", 1.0f64), "1"); t!(format!("{:.3f}", 1.0f64), "1.000"); t!(format!("{:10.3f}", 1.0f64), " 1.000"); t!(format!("{:+10.3f}", 1.0f64), " +1.000"); t!(format!("{:+10.3f}", -1.0f64), " -1.000"); // Escaping t!(format!("\\{"), "{"); t!(format!("\\}"), "}"); t!(format!("\\#"), "#"); t!(format!("\\\\"), "\\"); test_write(); test_print(); // make sure that format! doesn't move out of local variables let a = ~3; format!("{:?}", a); format!("{:?}", a); // make sure that format! doesn't cause spurious unused-unsafe warnings when // it's inside of an outer unsafe block unsafe { let a: int = ::std::cast::transmute(3u); format!("{}", a); } test_format_args(); // test that trailing commas are acceptable format!("{}", "test",); format!("{foo}", foo="test",); } // Basic test to make sure that we can invoke the `write!` macro with an // io::Writer instance. fn test_write() { let mut buf = MemWriter::new(); write!(&mut buf as &mut io::Writer, "{}", 3); { let w = &mut buf as &mut io::Writer; write!(w, "{foo}", foo=4); write!(w, "{:s}", "hello"); writeln!(w, "{}", "line"); writeln!(w, "{foo}", foo="bar"); } let s = str::from_utf8_owned(buf.inner()); t!(s, "34helloline\nbar\n"); } // Just make sure that the macros are defined, there's not really a lot that we // can do with them just yet (to test the output) fn test_print() { print!("hi"); print!("{:?}", ~[0u8]); println!("hello"); println!("this is a {}", "test"); println!("{foo}", foo="bar"); } // Just make sure that the macros are defined, there's not really a lot that we // can do with them just yet (to test the output) fn test_format_args() { let mut buf = MemWriter::new(); { let w = &mut buf as &mut io::Writer; format_args!(|args| { fmt::write(w, args) }, "{}", 1); format_args!(|args| { fmt::write(w, args) }, "test"); format_args!(|args| { fmt::write(w, args) }, "{test}", test=3); } let s = str::from_utf8_owned(buf.inner()); t!(s, "1test3"); let s = format_args!(fmt::format, "hello {}", "world"); t!(s, "hello world"); }
35.150171
80
0.448781
26e88f6a1b26649b020fc1db5379c78188b165ab
460
extern "C" { pub fn get_span_size() -> i64; pub fn get_ask_count() -> i64; pub fn get_min_count() -> i64; pub fn get_ans_count() -> i64; pub fn read_calldata(offset: i64) -> i64; pub fn set_return_data(offset: i64, len: i64); pub fn ask_external_data(eid: i64, did: i64, offset: i64, len: i64); pub fn get_external_data_status(eid: i64, vid: i64) -> i64; pub fn read_external_data(eid: i64, vid: i64, offset: i64) -> i64; }
38.333333
72
0.643478
fbe232e5e962289cda1ed798a73256207d17e5db
3,251
//! Kraken models. use std::fmt::Debug; use rust_decimal::Decimal; use common::asset; /// Static check to ensure only kraken model inners are wrapped in outers. pub trait Inner { } #[derive(Debug, Clone, Deserialize, PartialEq, Eq)] #[serde(untagged)] pub enum TradeMatchItem { Text(String), Timestamp(Decimal), } #[derive(Debug, Clone, Deserialize)] pub enum Items { XXBTZUSD(Vec<Vec<TradeMatchItem>>), XETHZUSD(Vec<Vec<TradeMatchItem>>), XETHXXBT(Vec<Vec<TradeMatchItem>>), } impl Items { fn pair(&self) -> asset::Pair { match self { Items::XETHZUSD(_) => asset::ETH_USD, Items::XXBTZUSD(_) => asset::BTC_USD, Items::XETHXXBT(_) => asset::ETH_BTC, } } fn items(&self) -> &Vec<Vec<TradeMatchItem>> { match self { Items::XETHZUSD(ref items) => items, Items::XXBTZUSD(ref items) => items, Items::XETHXXBT(ref items) => items, } } } /// Trade history as [returned by kraken](https://www.kraken.com/en-us/help/api#get-recent-trades). #[derive(Debug, Clone, Deserialize)] pub struct TradeHistory { #[serde(flatten)] items: Items, last: String, } impl TradeHistory { pub fn new(items: Items, last: String) -> Self { TradeHistory { items, last, } } pub fn last(&self) -> &str { self.last.as_str() } pub fn pair(&self) -> asset::Pair { self.items.pair() } pub fn items(&self) -> &Vec<Vec<TradeMatchItem>> { self.items.items() } } impl Inner for TradeHistory { } /// Outer object that contains either an error or the result itself. #[derive(Debug, Clone, Deserialize)] pub struct Outer<T: Inner + Clone + Debug> { error: Vec<String>, result: Option<T>, } impl<T: Inner + Clone + Debug> Outer<T> { pub fn error(&self) -> &[String] { self.error.as_slice() } pub fn result(&self) -> Option<&T> { self.result.as_ref() } pub fn consume(self) -> (Vec<String>, Option<T>) { (self.error, self.result) } } #[cfg(test)] mod tests { use serde_json; use super::*; static TRADE_HISTORY_BTC_USD_JSON: &str = r##"{"error":[],"result":{"XXBTZUSD":[["6650.00000","0.00100000",1535271158.4026,"b","m",""],["6650.00000","0.19900000",1535271158.4217,"b","m",""],["6650.00000","0.10000000",1535271158.4299,"b","m",""]],"last":"1535290179989384853"}}"##; static TRADE_HISTORY_ETH_USD_JSON: &str = r##"{"error":[],"result":{"XETHZUSD":[["6650.00000","0.00100000",1535271158.4026,"b","m",""],["6650.00000","0.19900000",1535271158.4217,"b","m",""],["6650.00000","0.10000000",1535271158.4299,"b","m",""]],"last":"1535290179989384853"}}"##; #[test] fn deserialize_trade_history() { let history: Outer<TradeHistory> = serde_json::from_str(TRADE_HISTORY_BTC_USD_JSON) .expect("Failed to deserialize."); assert!(history.result().unwrap().last.as_str() == "1535290179989384853"); let history: Outer<TradeHistory> = serde_json::from_str(TRADE_HISTORY_ETH_USD_JSON) .expect("Failed to deserialize."); assert!(history.result().unwrap().last.as_str() == "1535290179989384853"); } }
28.025862
284
0.603507
89444aaca1c05faf98df59d61d9c07eb5eb15528
3,130
// // Copyright 2016 Andrew Hunter // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //! //! # Pipes //! //! Pipes should be familiar to anyone who has used a UNIX-like operating system before. They //! connect the output of one component to the input of another. The tuple type `Pipe` is used //! to represent the pipe between two components. For example: //! //! ``` //! # use tametree::component::*; //! let add_one = component_fn(|x: &i32| { x+1 }); //! let add_two = component_fn(|x: &i32| { x+2 }); //! //! let add_three = Pipe(add_one, add_two); //! # //! # let mut endpoint = ComponentEndPoint::<i32, i32>::new(add_three); //! # endpoint.send(1); //! # assert!(endpoint.recv().unwrap() == 4); //! ``` //! //! This pipe can be used as a component: //! //! ``` //! # use tametree::component::*; //! # let add_one = component_fn(|x: &i32| { x+1 }); //! # let add_two = component_fn(|x: &i32| { x+2 }); //! # //! # let add_three = Pipe(add_one, add_two); //! # //! let mut endpoint = ComponentEndPoint::<i32, i32>::new(add_three); //! endpoint.send(1); //! assert!(endpoint.recv().unwrap() == 4); //! ``` //! //! use std::rc::*; use super::component::*; use super::immediate_publisher::*; struct Pipeline(ComponentRef, ComponentRef); impl Component for Pipeline { } impl Drop for Pipeline { fn drop(&mut self) { } } /// /// A component that takes the output of `TFirst` and connects it to the input of `TSecond` /// pub struct Pipe<TFirst: ConvertToComponent, TSecond: ConvertToComponent>(pub TFirst, pub TSecond); impl<TFirst: ConvertToComponent, TSecond: ConvertToComponent> ConvertToComponent for Pipe<TFirst, TSecond> { #[inline] fn into_component(self, consumer: ConsumerRef, publisher: PublisherRef) -> ComponentRef { let Pipe(first, second) = self; let pipeline_start = ImmediatePublisher::new(); let pipeline_end = pipeline_start.create_consumer(); let first_component = first.into_component(consumer, pipeline_start); let second_component = second.into_component(pipeline_end, publisher); Rc::new(Pipeline(first_component, second_component)) } } /* * TODO: would like to do this for function components as it's more efficient * but figuring out how to write the types so we don't get a conflict with the more generic version isn't easy * * use rustc_serialize::*; impl<TIn: 'static + DecodeFromTreeNode, TResult: Decodable + Encodable + EncodeToTreeNode + 'static, TOut: 'static + ToTreeNode> ConvertToComponent for Pipe<Box<Fn(&TIn) -> TResult>, Box<Fn(&TResult) -> TOut>> { ... } */
34.395604
148
0.671246
8969614b3e0df9af9f82a676f1637418c95762ac
790
use adder; mod common; // Files in subdirectories of the tests directory don’t get compiled as separate crates or have sections in the test output // tests/common/mod.rs is accessible from any of the integration test files as a module #[test] fn adds_two() { common::setup(); // do some shared initialization assert_eq!(4, adder::add_two(2)); } // for config examples, see: // https://joshleeb.com/posts/rust-integration-tests.html // see cargo.toml for integration config // usage: // # Run all tests // cargo test // # Run only unit tests // cargo test --lib // # Run only integration tests // cargo test --test integration // # Run only integration tests, single threaded // # (you’ll probably want this one) // cargo test --test integration -- --test-threads=1
29.259259
135
0.701266
ac6f6f2a687879e906ee01b11ec5ad8b2a8a6cbe
861
//! This module implements the global `globalThis` property. //! //! The global globalThis property contains the global this value, //! which is akin to the global object. //! //! More information: //! - [MDN documentation][mdn] //! - [ECMAScript reference][spec] //! //! [spec]: https://tc39.es/ecma262/#sec-globalthis //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/globalThis use crate::{builtins::BuiltIn, Context, JsValue}; use boa_profiler::Profiler; #[cfg(test)] mod tests; /// The JavaScript `globalThis`. pub(crate) struct GlobalThis; impl BuiltIn for GlobalThis { const NAME: &'static str = "globalThis"; fn init(context: &mut Context) -> Option<JsValue> { let _timer = Profiler::global().start_event(Self::NAME, "init"); Some(context.global_object().clone().into()) } }
27.774194
102
0.68525
ab308c4f3520101fedc1cc991a1581a6acce693e
4,793
//! [PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}](https://matrix.org/docs/spec/client_server/r0.6.1#put-matrix-client-r0-rooms-roomid-send-eventtype-txnid) use ruma_api::{ruma_api, Metadata}; use ruma_events::{AnyMessageEventContent, EventContent as _}; use ruma_identifiers::{EventId, RoomId}; use ruma_serde::Outgoing; ruma_api! { metadata: { description: "Send a message event to a room.", method: PUT, name: "create_message_event", path: "/_matrix/client/r0/rooms/:room_id/send/:event_type/:txn_id", rate_limited: false, authentication: AccessToken, } response: { /// A unique identifier for the event. pub event_id: EventId, } error: crate::Error } /// Data for a request to the `send_message_event` API endpoint. /// /// Send a message event to a room. #[derive(Clone, Debug, Outgoing)] #[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] #[incoming_derive(!Deserialize)] pub struct Request<'a> { /// The room to send the event to. pub room_id: &'a RoomId, /// The transaction ID for this event. /// /// Clients should generate an ID unique across requests with the /// same access token; it will be used by the server to ensure /// idempotency of requests. pub txn_id: &'a str, /// The event content to send. pub content: &'a AnyMessageEventContent, } impl<'a> Request<'a> { /// Creates a new `Request` with the given room id, transaction id and event content. pub fn new(room_id: &'a RoomId, txn_id: &'a str, content: &'a AnyMessageEventContent) -> Self { Self { room_id, txn_id, content } } } impl Response { /// Creates a new `Response` with the given event id. pub fn new(event_id: EventId) -> Self { Self { event_id } } } #[cfg(feature = "client")] impl<'a> ruma_api::OutgoingRequest for Request<'a> { type EndpointError = crate::Error; type IncomingResponse = Response; const METADATA: Metadata = METADATA; fn try_into_http_request( self, base_url: &str, access_token: Option<&str>, ) -> Result<http::Request<Vec<u8>>, ruma_api::error::IntoHttpError> { use http::header::{HeaderValue, AUTHORIZATION, CONTENT_TYPE}; use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC}; let http_request = http::Request::builder() .method(http::Method::PUT) .uri(format!( "{}/_matrix/client/r0/rooms/{}/send/{}/{}", base_url.strip_suffix('/').unwrap_or(base_url), utf8_percent_encode(self.room_id.as_str(), NON_ALPHANUMERIC), utf8_percent_encode(self.content.event_type(), NON_ALPHANUMERIC), utf8_percent_encode(&self.txn_id, NON_ALPHANUMERIC), )) .header(CONTENT_TYPE, "application/json") .header( AUTHORIZATION, HeaderValue::from_str(&format!( "Bearer {}", access_token.ok_or(ruma_api::error::IntoHttpError::NeedsAuthentication)? ))?, ) .body(serde_json::to_vec(&self.content)?)?; Ok(http_request) } } #[cfg(feature = "server")] impl ruma_api::IncomingRequest for IncomingRequest { type EndpointError = crate::Error; type OutgoingResponse = Response; const METADATA: Metadata = METADATA; fn try_from_http_request( request: http::Request<Vec<u8>>, ) -> Result<Self, ruma_api::error::FromHttpRequestError> { use std::convert::TryFrom; use ruma_api::try_deserialize; use serde_json::value::RawValue as RawJsonValue; let path_segments: Vec<&str> = request.uri().path()[1..].split('/').collect(); let room_id = { let decoded = try_deserialize!( request, percent_encoding::percent_decode(path_segments[4].as_bytes()).decode_utf8(), ); try_deserialize!(request, RoomId::try_from(&*decoded)) }; let txn_id = try_deserialize!( request, percent_encoding::percent_decode(path_segments[7].as_bytes()).decode_utf8(), ) .into_owned(); let content = { let request_body: Box<RawJsonValue> = try_deserialize!(request, serde_json::from_slice(request.body().as_slice())); let event_type = try_deserialize!( request, percent_encoding::percent_decode(path_segments[6].as_bytes()).decode_utf8() ); try_deserialize!(request, AnyMessageEventContent::from_parts(&event_type, request_body)) }; Ok(Self { room_id, txn_id, content }) } }
32.828767
174
0.615481
097a43b526ccfa258275e28df659e2d33cfcd668
513
#[derive(QueryableByName, Debug)] pub struct WithCount<T> { #[sql_type = "::diesel::sql_types::BigInt"] total: i64, #[diesel(embed)] record: T, } pub trait WithCountExtension<T> { fn records_and_total(self) -> (Vec<T>, i64); } impl<T> WithCountExtension<T> for Vec<WithCount<T>> { fn records_and_total(self) -> (Vec<T>, i64) { let cnt = self.get(0).map(|row| row.total).unwrap_or(0); let vec = self.into_iter().map(|row| row.record).collect(); (vec, cnt) } }
25.65
67
0.608187
28671c10a4c1387c0d225e0c759d8552f08b6cae
2,077
// private sub-module defined in other files mod add_point_coordinates_to_table; mod clean_vector; mod convert_nodata_to_zero; mod convert_raster_format; mod csv_points_to_vector; mod export_table_to_csv; mod join_tables; mod lines_to_polygons; mod merge_table_with_csv; mod merge_vectors; mod modify_nodata_value; mod multipart_to_singlepart; mod new_raster; mod polygons_to_lines; mod print_geotiff_tags; mod raster_to_vector_lines; mod raster_to_vector_points; mod reinitialize_attribute_table; mod remove_polygon_holes; mod set_nodata_value; mod singlepart_to_multipart; mod vector_lines_to_raster; mod vector_points_to_raster; mod vector_polygons_to_raster; // exports identifiers from private sub-modules in the current module namespace pub use self::add_point_coordinates_to_table::AddPointCoordinatesToTable; pub use self::clean_vector::CleanVector; pub use self::convert_nodata_to_zero::ConvertNodataToZero; pub use self::convert_raster_format::ConvertRasterFormat; pub use self::csv_points_to_vector::CsvPointsToVector; pub use self::export_table_to_csv::ExportTableToCsv; pub use self::join_tables::JoinTables; pub use self::lines_to_polygons::LinesToPolygons; pub use self::merge_table_with_csv::MergeTableWithCsv; pub use self::merge_vectors::MergeVectors; pub use self::modify_nodata_value::ModifyNoDataValue; pub use self::multipart_to_singlepart::MultiPartToSinglePart; pub use self::new_raster::NewRasterFromBase; pub use self::polygons_to_lines::PolygonsToLines; pub use self::print_geotiff_tags::PrintGeoTiffTags; pub use self::raster_to_vector_lines::RasterToVectorLines; pub use self::raster_to_vector_points::RasterToVectorPoints; pub use self::reinitialize_attribute_table::ReinitializeAttributeTable; pub use self::remove_polygon_holes::RemovePolygonHoles; pub use self::set_nodata_value::SetNodataValue; pub use self::singlepart_to_multipart::SinglePartToMultiPart; pub use self::vector_lines_to_raster::VectorLinesToRaster; pub use self::vector_points_to_raster::VectorPointsToRaster; pub use self::vector_polygons_to_raster::VectorPolygonsToRaster;
39.942308
79
0.860857
89c811c9327f1870b2f9aaab4d75280f5f2cf5ec
2,573
//! A very simple shader example. #[macro_use] extern crate gfx; extern crate cgmath; extern crate ggez; use ggez::event; use ggez::graphics::{self, DrawMode, Drawable}; use ggez::timer; use ggez::{Context, GameResult}; use std::env; use std::path; gfx_defines! { constant Dim { rate: f32 = "u_Rate", } } struct MainState { dim: Dim, shader: graphics::Shader<Dim>, } impl MainState { fn new(ctx: &mut Context) -> GameResult<MainState> { let dim = Dim { rate: 0.5 }; let shader = graphics::Shader::new( ctx, "/basic_150.glslv", "/dimmer_150.glslf", dim, "Dim", None, )?; Ok(MainState { dim, shader }) } } impl event::EventHandler for MainState { fn update(&mut self, ctx: &mut Context) -> GameResult { self.dim.rate = 0.5 + (((timer::ticks(ctx) as f32) / 100.0).cos() / 2.0); Ok(()) } fn draw(&mut self, ctx: &mut Context) -> GameResult { graphics::clear(ctx, [0.1, 0.2, 0.3, 1.0].into()); graphics::Mesh::new_circle( ctx, DrawMode::Fill, cgmath::Point2::new(100.0, 300.0), 100.0, 2.0, graphics::WHITE, )? .draw(ctx, (cgmath::Point2::new(0.0, 0.0),))?; { let _lock = graphics::use_shader(ctx, &self.shader); self.shader.send(ctx, self.dim)?; graphics::Mesh::new_circle( ctx, DrawMode::Fill, cgmath::Point2::new(400.0, 300.0), 100.0, 2.0, graphics::WHITE, )? .draw(ctx, (cgmath::Point2::new(0.0, 0.0),))?; } graphics::Mesh::new_circle( ctx, DrawMode::Fill, cgmath::Point2::new(700.0, 300.0), 100.0, 2.0, graphics::WHITE, )? .draw(ctx, (cgmath::Point2::new(0.0, 0.0),))?; graphics::present(ctx)?; Ok(()) } } pub fn main() -> GameResult { let resource_dir = if let Ok(manifest_dir) = env::var("CARGO_MANIFEST_DIR") { let mut path = path::PathBuf::from(manifest_dir); path.push("resources"); path } else { path::PathBuf::from("./resources") }; let cb = ggez::ContextBuilder::new("shader", "ggez").add_resource_path(resource_dir); let (ctx, event_loop) = &mut cb.build()?; let state = &mut MainState::new(ctx)?; event::run(ctx, event_loop, state) }
24.740385
89
0.502915
6a747b04345834a62b12550dc4644b983af785e2
2,124
use std::collections::BTreeMap; use super::ColumnDisplayInfo; use crate::style::ContentArrangement; use crate::table::Table; mod constraints; mod disabled; mod dynamic; mod helper; type DisplayInfos = BTreeMap<usize, ColumnDisplayInfo>; /// Determine the width of each column depending on the content of the given table. /// The results uses Option<usize>, since users can choose to hide columns. pub(crate) fn arrange_content(table: &Table) -> Vec<ColumnDisplayInfo> { let table_width = table.get_table_width().map(usize::from); let mut infos = BTreeMap::new(); let visible_columns = helper::count_visible_columns(&table.columns); for column in table.columns.iter() { if column.constraint.is_some() { constraints::evaluate(table, column, &mut infos, table_width, visible_columns); } } //println!("After initial constraints: {:#?}", infos); // Fallback to `ContentArrangement::Disabled`, if we don't have any information // on how wide the table should be. let table_width = match table_width { Some(table_width) => table_width, None => { disabled::arrange(&table, &mut infos, visible_columns); return infos.into_iter().map(|(_, info)| info).collect(); } }; match &table.arrangement { ContentArrangement::Disabled => disabled::arrange(&table, &mut infos, visible_columns), ContentArrangement::Dynamic | ContentArrangement::DynamicFullWidth => { dynamic::arrange(table, &mut infos, table_width); } } infos.into_iter().map(|(_, info)| info).collect() } #[cfg(test)] mod tests { use super::*; #[test] fn test_disabled_arrangement() { let mut table = Table::new(); table.set_header(&vec!["head", "head", "head"]); table.add_row(&vec!["__", "fivef", "sixsix"]); let display_infos = arrange_content(&table); // The width should be the width of the rows + padding let widths: Vec<u16> = display_infos.iter().map(|info| info.width()).collect(); assert_eq!(widths, vec![6, 7, 8]); } }
32.676923
95
0.646893
fbd7c68665bdf759010d7fa75477f4a9113f314b
2,901
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use libra_crypto::{ hash::{CryptoHash, CryptoHasher}, HashValue, }; use libra_crypto_derive::CryptoHasher; #[cfg(any(test, feature = "fuzzing"))] use proptest::{arbitrary::Arbitrary, prelude::*}; #[cfg(any(test, feature = "fuzzing"))] use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher)] pub struct AccountStateBlob { blob: Vec<u8>, } impl AsRef<[u8]> for AccountStateBlob { fn as_ref(&self) -> &[u8] { &self.blob } } impl From<&AccountStateBlob> for Vec<u8> { fn from(account_state_blob: &AccountStateBlob) -> Vec<u8> { account_state_blob.blob.clone() } } impl From<AccountStateBlob> for Vec<u8> { fn from(account_state_blob: AccountStateBlob) -> Vec<u8> { Self::from(&account_state_blob) } } impl From<Vec<u8>> for AccountStateBlob { fn from(blob: Vec<u8>) -> AccountStateBlob { AccountStateBlob { blob } } } impl CryptoHash for AccountStateBlob { type Hasher = AccountStateBlobHasher; fn hash(&self) -> HashValue { let mut hasher = Self::Hasher::default(); hasher.update(&self.blob); hasher.finish() } } #[cfg(any(test, feature = "fuzzing"))] prop_compose! { fn account_state_blob_strategy()(account_resource in any::<AccountResource>(), balance_resource in any::<BalanceResource>()) -> AccountStateBlob { AccountStateBlob::try_from((&account_resource, &balance_resource)).unwrap() } } #[cfg(any(test, feature = "fuzzing"))] impl Arbitrary for AccountStateBlob { type Parameters = (); fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { account_state_blob_strategy().boxed() } type Strategy = BoxedStrategy<Self>; } #[cfg(test)] mod tests { use super::*; use lcs::test_helpers::assert_canonical_encode_decode; use proptest::collection::vec; fn hash_blob(blob: &[u8]) -> HashValue { let mut hasher = AccountStateBlobHasher::default(); hasher.update(blob); hasher.finish() } proptest! { #[test] fn account_state_blob_hash(blob in vec(any::<u8>(), 1..100)) { prop_assert_eq!(hash_blob(&blob), AccountStateBlob::from(blob).hash()); } #[test] fn account_state_blob_lcs_roundtrip(account_state_blob in any::<AccountStateBlob>()) { assert_canonical_encode_decode(account_state_blob); } #[test] fn account_state_with_proof_lcs_roundtrip(account_state_with_proof in any::<AccountStateWithProof>()) { assert_canonical_encode_decode(account_state_with_proof); } } #[test] fn test_debug_does_not_panic() { format!("{:#?}", AccountStateBlob::from(vec![1u8, 2u8, 3u8])); } }
27.628571
150
0.655291
283adf3e15488824921b3a2324bbcbef54bb2ed2
12,203
use std::fs::{self, File}; use std::str; use tempfile; use std::io::{self, Write}; use std::sync::Arc; use rustls; use rustls::internal::msgs::{codec::Codec, codec::Reader, message::Message}; use rustls::internal::pemfile; use rustls::ProtocolVersion; use rustls::Session; use rustls::TLSError; use rustls::{AllowAnyAuthenticatedClient, NoClientAuth, RootCertStore}; use rustls::{Certificate, PrivateKey}; use rustls::{ClientConfig, ClientSession}; use rustls::{ServerConfig, ServerSession}; #[cfg(feature = "dangerous_configuration")] use rustls::{ClientCertVerified, ClientCertVerifier, DistinguishedNames}; use webpki; macro_rules! embed_files { ( $( ($name:ident, $keytype:expr, $path:expr); )+ ) => { $( const $name: &'static [u8] = include_bytes!( concat!("../../../test-ca/", $keytype, "/", $path)); )+ pub fn bytes_for(keytype: &str, path: &str) -> &'static [u8] { match (keytype, path) { $( ($keytype, $path) => $name, )+ _ => panic!("unknown keytype {} with path {}", keytype, path), } } pub fn new_test_ca() -> tempfile::TempDir { let dir = tempfile::TempDir::new().unwrap(); fs::create_dir(dir.path().join("ecdsa")).unwrap(); fs::create_dir(dir.path().join("rsa")).unwrap(); $( let mut f = File::create(dir.path().join($keytype).join($path)).unwrap(); f.write($name).unwrap(); )+ dir } } } embed_files! { (ECDSA_CA_CERT, "ecdsa", "ca.cert"); (ECDSA_CA_DER, "ecdsa", "ca.der"); (ECDSA_CA_KEY, "ecdsa", "ca.key"); (ECDSA_CLIENT_CERT, "ecdsa", "client.cert"); (ECDSA_CLIENT_CHAIN, "ecdsa", "client.chain"); (ECDSA_CLIENT_FULLCHAIN, "ecdsa", "client.fullchain"); (ECDSA_CLIENT_KEY, "ecdsa", "client.key"); (ECDSA_CLIENT_REQ, "ecdsa", "client.req"); (ECDSA_END_CERT, "ecdsa", "end.cert"); (ECDSA_END_CHAIN, "ecdsa", "end.chain"); (ECDSA_END_FULLCHAIN, "ecdsa", "end.fullchain"); (ECDSA_END_KEY, "ecdsa", "end.key"); (ECDSA_END_REQ, "ecdsa", "end.req"); (ECDSA_INTER_CERT, "ecdsa", "inter.cert"); (ECDSA_INTER_KEY, "ecdsa", "inter.key"); (ECDSA_INTER_REQ, "ecdsa", "inter.req"); (ECDSA_NISTP256_PEM, "ecdsa", "nistp256.pem"); (ECDSA_NISTP384_PEM, "ecdsa", "nistp384.pem"); (RSA_CA_CERT, "rsa", "ca.cert"); (RSA_CA_DER, "rsa", "ca.der"); (RSA_CA_KEY, "rsa", "ca.key"); (RSA_CLIENT_CERT, "rsa", "client.cert"); (RSA_CLIENT_CHAIN, "rsa", "client.chain"); (RSA_CLIENT_FULLCHAIN, "rsa", "client.fullchain"); (RSA_CLIENT_KEY, "rsa", "client.key"); (RSA_CLIENT_REQ, "rsa", "client.req"); (RSA_CLIENT_RSA, "rsa", "client.rsa"); (RSA_END_CERT, "rsa", "end.cert"); (RSA_END_CHAIN, "rsa", "end.chain"); (RSA_END_FULLCHAIN, "rsa", "end.fullchain"); (RSA_END_KEY, "rsa", "end.key"); (RSA_END_REQ, "rsa", "end.req"); (RSA_END_RSA, "rsa", "end.rsa"); (RSA_INTER_CERT, "rsa", "inter.cert"); (RSA_INTER_KEY, "rsa", "inter.key"); (RSA_INTER_REQ, "rsa", "inter.req"); } pub fn transfer(left: &mut dyn Session, right: &mut dyn Session) -> usize { let mut buf = [0u8; 262144]; let mut total = 0; while left.wants_write() { let sz = { let into_buf: &mut dyn io::Write = &mut &mut buf[..]; left.write_tls(into_buf).unwrap() }; total += sz; if sz == 0 { return total; } let mut offs = 0; loop { let from_buf: &mut dyn io::Read = &mut &buf[offs..sz]; offs += right.read_tls(from_buf).unwrap(); if sz == offs { break; } } } total } pub fn transfer_altered<F>(left: &mut dyn Session, filter: F, right: &mut dyn Session) -> usize where F: Fn(&mut Message), { let mut buf = [0u8; 262144]; let mut total = 0; while left.wants_write() { let sz = { let into_buf: &mut dyn io::Write = &mut &mut buf[..]; left.write_tls(into_buf).unwrap() }; total += sz; if sz == 0 { return total; } let mut reader = Reader::init(&buf[..sz]); while reader.any_left() { let mut message = Message::read(&mut reader).unwrap(); message.decode_payload(); filter(&mut message); let message_enc = message.get_encoding(); let message_enc_reader: &mut dyn io::Read = &mut &message_enc[..]; let len = right.read_tls(message_enc_reader).unwrap(); assert_eq!(len, message_enc.len()); } } total } #[derive(Clone, Copy)] pub enum KeyType { RSA, ECDSA, } pub static ALL_KEY_TYPES: [KeyType; 2] = [KeyType::RSA, KeyType::ECDSA]; impl KeyType { fn bytes_for(&self, part: &str) -> &'static [u8] { match self { KeyType::RSA => bytes_for("rsa", part), KeyType::ECDSA => bytes_for("ecdsa", part), } } pub fn get_chain(&self) -> Vec<Certificate> { pemfile::certs(&mut io::BufReader::new(self.bytes_for("end.fullchain"))).unwrap() } pub fn get_key(&self) -> PrivateKey { pemfile::pkcs8_private_keys(&mut io::BufReader::new(self.bytes_for("end.key"))).unwrap()[0] .clone() } fn get_client_chain(&self) -> Vec<Certificate> { pemfile::certs(&mut io::BufReader::new(self.bytes_for("client.fullchain"))).unwrap() } fn get_client_key(&self) -> PrivateKey { pemfile::pkcs8_private_keys(&mut io::BufReader::new(self.bytes_for("client.key"))).unwrap() [0] .clone() } } pub fn make_server_config(kt: KeyType) -> ServerConfig { let mut cfg = ServerConfig::new(NoClientAuth::new()); cfg.set_single_cert(kt.get_chain(), kt.get_key()).unwrap(); cfg } pub fn get_client_root_store(kt: KeyType) -> RootCertStore { let roots = kt.get_chain(); let mut client_auth_roots = RootCertStore::empty(); for root in roots { client_auth_roots.add(&root).unwrap(); } client_auth_roots } pub fn make_server_config_with_mandatory_client_auth(kt: KeyType) -> ServerConfig { let client_auth_roots = get_client_root_store(kt); let client_auth = AllowAnyAuthenticatedClient::new(client_auth_roots); let mut cfg = ServerConfig::new(NoClientAuth::new()); cfg.set_client_certificate_verifier(client_auth); cfg.set_single_cert(kt.get_chain(), kt.get_key()).unwrap(); cfg } pub fn make_client_config(kt: KeyType) -> ClientConfig { let mut cfg = ClientConfig::new(); let mut rootbuf = io::BufReader::new(kt.bytes_for("ca.cert")); cfg.root_store.add_pem_file(&mut rootbuf).unwrap(); cfg } pub fn make_client_config_with_auth(kt: KeyType) -> ClientConfig { let mut cfg = make_client_config(kt); cfg.set_single_client_cert(kt.get_client_chain(), kt.get_client_key()) .unwrap(); cfg } pub fn make_pair(kt: KeyType) -> (ClientSession, ServerSession) { make_pair_for_configs(make_client_config(kt), make_server_config(kt)) } pub fn make_pair_for_configs( client_config: ClientConfig, server_config: ServerConfig, ) -> (ClientSession, ServerSession) { make_pair_for_arc_configs(&Arc::new(client_config), &Arc::new(server_config)) } pub fn make_pair_for_arc_configs( client_config: &Arc<ClientConfig>, server_config: &Arc<ServerConfig>, ) -> (ClientSession, ServerSession) { ( ClientSession::new(client_config, dns_name("localhost")), ServerSession::new(server_config), ) } pub fn do_handshake(client: &mut ClientSession, server: &mut ServerSession) -> (usize, usize) { let (mut to_client, mut to_server) = (0, 0); while server.is_handshaking() || client.is_handshaking() { to_server += transfer(client, server); server.process_new_packets().unwrap(); to_client += transfer(server, client); client.process_new_packets().unwrap(); } (to_server, to_client) } pub struct AllClientVersions { client_config: ClientConfig, index: usize, } impl AllClientVersions { pub fn new(client_config: ClientConfig) -> AllClientVersions { AllClientVersions { client_config, index: 0, } } } impl Iterator for AllClientVersions { type Item = ClientConfig; fn next(&mut self) -> Option<ClientConfig> { let mut config = self.client_config.clone(); self.index += 1; match self.index { 1 => { config.versions = vec![ProtocolVersion::TLSv1_2]; Some(config) } 2 => { config.versions = vec![ProtocolVersion::TLSv1_3]; Some(config) } _ => None, } } } #[cfg(feature = "dangerous_configuration")] pub struct MockClientVerifier { pub verified: fn() -> Result<ClientCertVerified, TLSError>, pub subjects: Option<DistinguishedNames>, pub mandatory: Option<bool>, } #[cfg(feature = "dangerous_configuration")] impl ClientCertVerifier for MockClientVerifier { fn client_auth_mandatory(&self, sni: Option<&webpki::DNSName>) -> Option<bool> { // This is just an added 'test' to make sure we plumb through the SNI, // although its valid for it to be None, its just our tests should (as of now) always provide it assert!(sni.is_some()); self.mandatory } fn client_auth_root_subjects( &self, sni: Option<&webpki::DNSName>, ) -> Option<DistinguishedNames> { assert!(sni.is_some()); self.subjects.as_ref().cloned() } fn verify_client_cert( &self, _presented_certs: &[Certificate], sni: Option<&webpki::DNSName>, ) -> Result<ClientCertVerified, TLSError> { assert!(sni.is_some()); (self.verified)() } } #[derive(PartialEq, Debug)] pub enum TLSErrorFromPeer { Client(TLSError), Server(TLSError), } pub fn do_handshake_until_error( client: &mut ClientSession, server: &mut ServerSession, ) -> Result<(), TLSErrorFromPeer> { while server.is_handshaking() || client.is_handshaking() { transfer(client, server); server .process_new_packets() .map_err(|err| TLSErrorFromPeer::Server(err))?; transfer(server, client); client .process_new_packets() .map_err(|err| TLSErrorFromPeer::Client(err))?; } Ok(()) } pub fn do_handshake_until_both_error( client: &mut ClientSession, server: &mut ServerSession, ) -> Result<(), Vec<TLSErrorFromPeer>> { match do_handshake_until_error(client, server) { Err(server_err @ TLSErrorFromPeer::Server(_)) => { let mut errors = vec![server_err]; transfer(server, client); let client_err = client .process_new_packets() .map_err(|err| TLSErrorFromPeer::Client(err)) .expect_err("client didn't produce error after server error"); errors.push(client_err); Err(errors) } Err(client_err @ TLSErrorFromPeer::Client(_)) => { let mut errors = vec![client_err]; transfer(client, server); let server_err = server .process_new_packets() .map_err(|err| TLSErrorFromPeer::Server(err)) .expect_err("server didn't produce error after client error"); errors.push(server_err); Err(errors) } Ok(()) => Ok(()), } } pub fn dns_name(name: &'static str) -> webpki::DNSNameRef<'_> { webpki::DNSNameRef::try_from_ascii_str(name).unwrap() } pub struct FailsReads { errkind: io::ErrorKind, } impl FailsReads { pub fn new(errkind: io::ErrorKind) -> FailsReads { FailsReads { errkind } } } impl io::Read for FailsReads { fn read(&mut self, _b: &mut [u8]) -> io::Result<usize> { Err(io::Error::from(self.errkind)) } }
29.547215
104
0.596001
2698cbf0d5e388879601d1809035b7c0d623f20c
3,479
#![feature(min_specialization)] #[cfg(feature = "psp22")] #[brush::contract] mod psp22_wrapper { use contracts::psp22::extensions::wrapper::*; use ink_lang as ink; #[ink(storage)] #[derive(Default, PSP22Storage, PSP22WrapperStorage)] pub struct PSP22WrapperStruct { #[PSP22StorageField] psp22: PSP22Data, #[PSP22WrapperStorageField] wrapper: PSP22WrapperData, contract_balance: Balance, } impl PSP22 for PSP22WrapperStruct {} /// We will override cross-contract wrapper calls in tests /// The cross-contract interaction will be tested in integration tests impl PSP22WrapperInternal for PSP22WrapperStruct { fn _deposit(&mut self, amount: Balance) -> Result<(), PSP22Error> { self.contract_balance += amount; Ok(()) } fn _withdraw(&mut self, _account: AccountId, amount: Balance) -> Result<(), PSP22Error> { self.contract_balance -= amount; Ok(()) } fn _underlying_balance(&mut self) -> Balance { self.contract_balance } } impl PSP22Wrapper for PSP22WrapperStruct {} impl PSP22WrapperStruct { #[ink(constructor)] pub fn new(underlying: AccountId) -> Self { let mut instance = Self::default(); instance._init(underlying); instance } #[ink(message)] pub fn recover(&mut self) -> Result<Balance, PSP22Error> { self._recover(self.env().caller()) } #[ink(message)] pub fn burn(&mut self, amount: Balance) -> Result<(), PSP22Error> { self._burn(self.env().caller(), amount) } } #[ink::test] fn deposit_for_works() { let accounts = ink_env::test::default_accounts::<ink_env::DefaultEnvironment>().expect("Cannot get accounts"); let mut wrapper = PSP22WrapperStruct::new(AccountId::from([0x1; 32])); assert_eq!(wrapper.balance_of(accounts.alice), 0); assert_eq!(wrapper.total_supply(), 0); assert!(wrapper.deposit_for(accounts.alice, 100).is_ok()); assert_eq!(wrapper.balance_of(accounts.alice), 100); assert_eq!(wrapper.total_supply(), 100); } #[ink::test] fn withdraw_to_works() { let accounts = ink_env::test::default_accounts::<ink_env::DefaultEnvironment>().expect("Cannot get accounts"); let mut wrapper = PSP22WrapperStruct::new(AccountId::from([0x1; 32])); assert!(wrapper.deposit_for(accounts.alice, 100).is_ok()); assert_eq!(wrapper.balance_of(accounts.alice), 100); assert_eq!(wrapper.total_supply(), 100); assert!(wrapper.withdraw_to(accounts.alice, 100).is_ok()); assert_eq!(wrapper.balance_of(accounts.alice), 0); assert_eq!(wrapper.total_supply(), 0); } #[ink::test] fn recover_works() { let accounts = ink_env::test::default_accounts::<ink_env::DefaultEnvironment>().expect("Cannot get accounts"); let mut wrapper = PSP22WrapperStruct::new(AccountId::from([0x1; 32])); assert!(wrapper.deposit_for(accounts.alice, 100).is_ok()); assert!(wrapper.burn(100).is_ok()); assert_eq!(wrapper.balance_of(accounts.alice), 0); assert_eq!(wrapper.total_supply(), 0); assert!(wrapper.recover().is_ok()); assert_eq!(wrapper.balance_of(accounts.alice), 100); assert_eq!(wrapper.total_supply(), 100); } }
33.776699
118
0.622018
bf5e4254384d7eeaeb6430fd9cb50fdfda05e2f0
1,462
use super::*; #[automatically_generated_function] /// Returns whether the given node name respects the Drosophila gross anatomy nodes pattern. /// /// # Arguments /// * `node_name`: &str - Node name to check pattern with. /// /// # Example /// To validate a node you can use: /// ```ignore /// # use graph::*; /// let this_library_node_name = "FBbt:00002671PHENOTYPE"; /// let not_this_library_node_name = "PizzaQuattroStagioni"; /// assert!(is_valid_drosophila_gross_anatomy_node_name(this_library_node_name)); /// assert!(!is_valid_drosophila_gross_anatomy_node_name(not_this_library_node_name)); /// ``` pub fn is_valid_drosophila_gross_anatomy_node_name(node_name: &str) -> bool { is_valid_node_name_from_seeds( node_name, Some(&["FBBT"]), None, Some(":"), None, None, None, ) .is_ok() } #[automatically_generated_function] /// Returns URL from given Drosophila gross anatomy node name. /// /// # Arguments /// * `node_name`: &str - Node name to check pattern with. /// /// # Safety /// This method assumes that the provided node name is a Drosophila gross anatomy node name and /// may cause a panic if the aforementioned assumption is not true. /// pub(crate) unsafe fn format_drosophila_gross_anatomy_url_from_node_name(node_name: &str) -> String { format_url_from_node_name( "http://purl.obolibrary.org/obo/FBbt_{node_name}", node_name, Some(":"), ) }
30.458333
100
0.687415
7974667d30786da789475927bf18cf5a40747e64
1,382
#[cfg(test)] #[path = "../../tests/unit/termination/max_generation_test.rs"] mod max_generation_test; use super::*; use std::marker::PhantomData; /// A termination criteria which is in terminated state when maximum amount of generations is exceeded. pub struct MaxGeneration<C, O, S> where C: HeuristicContext<Objective = O, Solution = S>, O: HeuristicObjective<Solution = S>, S: HeuristicSolution, { limit: usize, _marker: (PhantomData<C>, PhantomData<O>, PhantomData<S>), } impl<C, O, S> MaxGeneration<C, O, S> where C: HeuristicContext<Objective = O, Solution = S>, O: HeuristicObjective<Solution = S>, S: HeuristicSolution, { /// Creates a new instance of `MaxGeneration`. pub fn new(limit: usize) -> Self { Self { limit, _marker: (Default::default(), Default::default(), Default::default()) } } } impl<C, O, S> Termination for MaxGeneration<C, O, S> where C: HeuristicContext<Objective = O, Solution = S>, O: HeuristicObjective<Solution = S>, S: HeuristicSolution, { type Context = C; type Objective = O; fn is_termination(&self, heuristic_ctx: &mut Self::Context) -> bool { heuristic_ctx.statistics().generation >= self.limit } fn estimate(&self, heuristic_ctx: &Self::Context) -> f64 { (heuristic_ctx.statistics().generation as f64 / self.limit as f64).min(1.) } }
28.791667
103
0.664255
6a021b982f87072372c5a58ce3734f6633659df2
904
// Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use super::errors::Error; use crate::{de::DeserializeOwned, from_slice, ser::Serialize, to_vec}; use cid::{Cid, Code::Blake2b256}; /// Cbor utility functions for serializable objects pub trait Cbor: Serialize + DeserializeOwned { /// Marshalls cbor encodable object into cbor bytes fn marshal_cbor(&self) -> Result<Vec<u8>, Error> { Ok(to_vec(&self)?) } /// Unmarshals cbor encoded bytes to object fn unmarshal_cbor(bz: &[u8]) -> Result<Self, Error> { Ok(from_slice(bz)?) } /// Returns the content identifier of the raw block of data /// Default is Blake2b256 hash fn cid(&self) -> Result<Cid, Error> { Ok(cid::new_from_cbor(&self.marshal_cbor()?, Blake2b256)) } } impl<T> Cbor for Vec<T> where T: Cbor {} impl<T> Cbor for Option<T> where T: Cbor {}
31.172414
70
0.664823
eb8273576e6b52ef8e61ca3efc3293a68e307b7a
2,049
mod client; pub mod key; pub mod secret; pub use client::KeyClient; pub use secret::RecoveryLevel; #[non_exhaustive] #[derive(thiserror::Error, Debug)] pub enum Error { #[error("Key Vault does not exist, or is unreachable at '{keyvault_name:?}.vault.azure.net'")] KeyVaultDoesNotExist { keyvault_name: String }, #[error("Azure Active Directory authorization error")] Authorization, #[error("Received an error accessing the Key Vault, which could not be parsed as expected.")] UnparsableError, #[error(transparent)] Reqwest(#[from] reqwest::Error), #[error("Key Vault Error: {0}")] General(String), #[error("Failed to parse response from Key Vault: {0}")] SerdeParse(#[from] serde_json::Error), #[error("Could not get vault domain")] DomainParse, #[error("URL parse error: {0}")] UrlParseError(#[from] url::ParseError), #[error("Failed to parse response from Key Vault when backing up secret {}, response body: {}, error: {}", secret_name, response_body, error)] BackupSecretParseError { error: serde_json::Error, secret_name: String, response_body: String, }, } #[cfg(test)] mod tests { use azure_core::{TokenCredential, TokenResponse}; use chrono::{Duration, Utc}; use oauth2::AccessToken; #[macro_export] macro_rules! mock_client { ($keyvault_name:expr, $creds:expr, ) => {{ KeyClient { vault_url: url::Url::parse(&mockito::server_url()).unwrap(), endpoint: "".to_string(), token_credential: $creds, token: None, } }}; } pub(crate) struct MockCredential; #[async_trait::async_trait] impl TokenCredential for MockCredential { async fn get_token(&self, _resource: &str) -> Result<TokenResponse, azure_core::Error> { Ok(TokenResponse::new( AccessToken::new("TOKEN".to_owned()), Utc::now() + Duration::days(14), )) } } }
28.068493
146
0.612494
edf6f7bf0cfadf41f484c507ca7c4732d539dfe9
22,842
use crate::{Arch, Ctx, Error, Path, PathBuf, PayloadKind, Variant}; use anyhow::Context as _; use rayon::prelude::*; use std::collections::BTreeMap; pub struct SplatConfig { pub include_debug_libs: bool, pub include_debug_symbols: bool, pub disable_symlinks: bool, pub preserve_ms_arch_notation: bool, pub output: PathBuf, pub copy: bool, //pub isolated: bool, } /// There is a massive amount of duplication between SDK headers for the Desktop /// and Store variants, so we keep track of them so we only splat one unique file pub(crate) struct SdkHeaders { pub(crate) inner: BTreeMap<u64, PathBuf>, pub(crate) root: PathBuf, } impl SdkHeaders { fn new(root: PathBuf) -> Self { Self { inner: BTreeMap::new(), root, } } #[inline] fn get_relative_path<'path>(&self, path: &'path Path) -> anyhow::Result<&'path Path> { let mut rel = path.strip_prefix(&self.root)?; // Skip the first directory, which directly follows the "include", as it // is the one that includes are actually relative to if let Some(first) = rel.iter().next() { rel = rel.strip_prefix(first)?; } Ok(rel) } } pub(crate) struct SplatRoots { crt: PathBuf, sdk: PathBuf, src: PathBuf, } pub(crate) fn prep_splat( ctx: std::sync::Arc<Ctx>, config: &SplatConfig, ) -> Result<SplatRoots, Error> { let crt_root = config.output.join("crt"); let sdk_root = config.output.join("sdk"); if crt_root.exists() { std::fs::remove_dir_all(&crt_root) .with_context(|| format!("unable to delete existing CRT directory {}", crt_root))?; } if sdk_root.exists() { std::fs::remove_dir_all(&sdk_root) .with_context(|| format!("unable to delete existing SDK directory {}", sdk_root))?; } std::fs::create_dir_all(&crt_root) .with_context(|| format!("unable to create CRT directory {}", crt_root))?; std::fs::create_dir_all(&sdk_root) .with_context(|| format!("unable to create SDK directory {}", sdk_root))?; let src_root = ctx.work_dir.join("unpack"); Ok(SplatRoots { crt: crt_root, sdk: sdk_root, src: src_root, }) } pub(crate) fn splat( config: &SplatConfig, roots: &SplatRoots, item: &crate::WorkItem, tree: crate::unpack::FileTree, arches: u32, variants: u32, ) -> Result<Option<SdkHeaders>, Error> { struct Mapping<'ft> { src: PathBuf, target: PathBuf, tree: &'ft crate::unpack::FileTree, kind: PayloadKind, variant: Option<Variant>, } let mut src = roots.src.join(&item.payload.filename); // If we're moving files from the unpack directory, invalidate it immediately // so it is recreated in a future run if anyhing goes wrong if !config.copy { src.push(".unpack"); if let Err(e) = std::fs::remove_file(&src) { tracing::warn!("Failed to remove {}: {}", src, e); } src.pop(); } let variant = item.payload.variant; let kind = item.payload.kind; let get_tree = |src_path: &Path| -> Result<&crate::unpack::FileTree, Error> { let src_path = src_path .strip_prefix(&roots.src) .context("incorrect src root")?; let src_path = src_path .strip_prefix(&item.payload.filename) .context("incorrect src subdir")?; tree.subtree(src_path) .with_context(|| format!("missing expected subtree '{}'", src_path)) }; let mappings = match item.payload.kind { PayloadKind::CrtHeaders => { src.push("include"); let tree = get_tree(&src)?; vec![Mapping { src, target: roots.crt.join("include"), tree, kind, variant, }] } PayloadKind::CrtLibs => { src.push("lib"); let mut target = roots.crt.join("lib"); let spectre = (variants & Variant::Spectre as u32) != 0; match item .payload .variant .context("CRT libs didn't specify a variant")? { Variant::Desktop => { if spectre { src.push("spectre"); target.push("spectre"); } } Variant::OneCore => { if spectre { src.push("spectre"); target.push("spectre"); } src.push("onecore"); target.push("onecore"); } Variant::Store => {} Variant::Spectre => unreachable!(), } { let arch = item .payload .target_arch .context("CRT libs didn't specify an architecture")?; src.push(arch.as_ms_str()); target.push(if config.preserve_ms_arch_notation { arch.as_ms_str() } else { arch.as_str() }); } let tree = get_tree(&src)?; vec![Mapping { src, target, tree, kind, variant, }] } PayloadKind::SdkHeaders => { src.push("include"); let tree = get_tree(&src)?; vec![Mapping { src, target: roots.sdk.join("include"), tree, kind, variant, }] } PayloadKind::SdkLibs => { src.push("lib/um"); let mut target = roots.sdk.join("lib/um"); { let arch = item .payload .target_arch .context("SDK libs didn't specify an architecture")?; src.push(arch.as_ms_str()); target.push(if config.preserve_ms_arch_notation { arch.as_ms_str() } else { arch.as_str() }); } let tree = get_tree(&src)?; vec![Mapping { src, target, tree, kind, variant, }] } PayloadKind::SdkStoreLibs => { src.push("lib/um"); let target = roots.sdk.join("lib/um"); Arch::iter(arches) .map(|arch| -> Result<Mapping<'_>, Error> { let src = src.join(arch.as_ms_str()); let tree = get_tree(&src)?; Ok(Mapping { src, target: target.join(if config.preserve_ms_arch_notation { arch.as_ms_str() } else { arch.as_str() }), tree, kind, variant, }) }) .collect::<Result<Vec<_>, _>>()? } PayloadKind::Ucrt => { let inc_src = src.join("include/ucrt"); let tree = get_tree(&inc_src)?; let mut mappings = vec![Mapping { src: inc_src, target: roots.sdk.join("include/ucrt"), tree, kind, variant, }]; src.push("lib/ucrt"); let target = roots.sdk.join("lib/ucrt"); for arch in Arch::iter(arches) { let src = src.join(arch.as_ms_str()); let tree = get_tree(&src)?; mappings.push(Mapping { src, target: target.join(if config.preserve_ms_arch_notation { arch.as_ms_str() } else { arch.as_str() }), tree, kind, variant, }); } mappings } }; let include_debug_libs = config.include_debug_libs; let include_debug_symbols = config.include_debug_symbols; let mut results = Vec::new(); item.progress.reset(); item.progress .set_length(mappings.iter().map(|map| map.tree.stats().1).sum()); item.progress.set_message("📦 splatting"); let filter_store = variants & Variant::Store as u32 == 0; mappings .into_par_iter() .map(|mapping| -> Result<Option<SdkHeaders>, Error> { struct Dir<'ft> { src: PathBuf, tar: PathBuf, tree: &'ft crate::unpack::FileTree, } let mut sdk_headers = (mapping.kind == PayloadKind::SdkHeaders) .then(|| SdkHeaders::new(mapping.target.clone())); let mut dir_stack = vec![Dir { src: mapping.src, tar: mapping.target, tree: mapping.tree, }]; while let Some(Dir { src, mut tar, tree }) = dir_stack.pop() { std::fs::create_dir_all(&tar) .with_context(|| format!("unable to create {}", tar))?; for (fname, size) in &tree.files { // Even if we don't splat 100% of the source files, we still // want to show that we processed them all item.progress.inc(*size); let fname_str = fname.as_str(); if mapping.kind == PayloadKind::CrtLibs || mapping.kind == PayloadKind::Ucrt { if !include_debug_symbols && fname.ends_with(".pdb") { tracing::debug!("skipping {}", fname); continue; } if !include_debug_libs { if let Some(stripped) = fname_str.strip_suffix(".lib") { if stripped.ends_with('d') || stripped.ends_with("d_netcore") || stripped .strip_suffix(|c: char| c.is_digit(10)) .map_or(false, |fname| fname.ends_with('d')) { tracing::debug!("skipping {}", fname); continue; } } } } tar.push(fname); let src_path = src.join(fname); if config.copy { std::fs::copy(&src_path, &tar) .with_context(|| format!("failed to copy {} to {}", src_path, tar))?; } else { std::fs::rename(&src_path, &tar) .with_context(|| format!("failed to move {} to {}", src_path, tar))?; } match mapping.kind { // These are all internally consistent and lowercased, so if // a library is including them with different casing that is // kind of on them // // The SDK headers are also all over the place with casing // as well as being internally inconsistent, so we scan // them all for includes and add those that are referenced // incorrectly, but we wait until after all the of headers // have been unpacked before fixing them PayloadKind::CrtHeaders | PayloadKind::Ucrt => {} PayloadKind::SdkHeaders => { if let Some(sdk_headers) = &mut sdk_headers { let rel_target_path = sdk_headers.get_relative_path(&tar)?; let rel_hash = calc_lower_hash(rel_target_path.as_str()); if sdk_headers.inner.insert(rel_hash, tar.clone()).is_some() { anyhow::bail!("found duplicate relative path when hashed"); } // https://github.com/zeromq/libzmq/blob/3070a4b2461ec64129062907d915ed665d2ac126/src/precompiled.hpp#L73 if let Some(additional_name) = match fname_str { "mstcpip.h" => Some("Mstcpip.h"), _ => None, } { tar.pop(); tar.push(additional_name); symlink(fname_str, &tar)?; } } } PayloadKind::CrtLibs => { // While _most_ of the libs *stares at Microsoft.VisualC.STLCLR.dll* are lower case, // sometimes when they are specified as linker arguments, crates will link with // SCREAMING as if they are angry at the linker, so fix this in the few "common" cases. // This list is probably not complete, but that's what PRs are for if let Some(angry_lib) = match fname_str.strip_suffix(".lib") { Some("libcmt") => Some("LIBCMT.lib"), Some("msvcrt") => Some("MSVCRT.lib"), Some("oldnames") => Some("OLDNAMES.lib"), _ => None, } { tar.pop(); tar.push(angry_lib); symlink(fname_str, &tar)?; } } PayloadKind::SdkLibs | PayloadKind::SdkStoreLibs => { // The SDK libraries are just completely inconsistent, but // all usage I have ever seen just links them with lowercase // names, so we just fix all of them to be lowercase. // Note that we need to not only fix the name but also the // extension, as for some inexplicable reason about half of // them use an uppercase L for the extension. WTF. This also // applies to the tlb files, so at least they are consistently // inconsistent if fname_str.contains(|c: char| c.is_ascii_uppercase()) { tar.pop(); tar.push(fname_str.to_ascii_lowercase()); symlink(fname_str, &tar)?; } // There is also this: https://github.com/time-rs/time/blob/v0.3.2/src/utc_offset.rs#L454 if let Some(additional_name) = match fname_str { "kernel32.Lib" => Some("Kernel32.lib"), _ => None, } { tar.pop(); tar.push(additional_name); symlink(fname_str, &tar)?; } // We also need to support SCREAMING case for the library names // due to...reasons https://github.com/microsoft/windows-rs/blob/a27a74784ccf304ab362bf2416f5f44e98e5eecd/src/bindings.rs#L3772 if tar.extension() == Some("lib") { tar.pop(); tar.push(fname_str.to_ascii_uppercase()); tar.set_extension("lib"); symlink(fname_str, &tar)?; } } } tar.pop(); } // Due to some libs from the CRT Store libs variant being needed // by the regular Desktop variant, if we are not actually // targetting the Store we can avoid adding the additional // uwp and store subdirectories if mapping.variant == Some(Variant::Store) && filter_store { if mapping.kind == PayloadKind::CrtLibs { tracing::debug!("skipping CRT subdirs"); item.progress .inc(tree.dirs.iter().map(|(_, ft)| ft.stats().1).sum()); continue; } else if mapping.kind == PayloadKind::SdkHeaders { } } for (dir, dtree) in &tree.dirs { dir_stack.push(Dir { src: src.join(dir), tar: tar.join(dir), tree: dtree, }); } } Ok(sdk_headers) }) .collect_into_vec(&mut results); item.progress.finish_with_message("📦 splatted"); let headers = results.into_iter().collect::<Result<Vec<_>, _>>()?; Ok(headers.into_iter().find_map(|headers| headers)) } #[inline] fn symlink(original: &str, link: &Path) -> Result<(), Error> { std::os::unix::fs::symlink(original, link) .with_context(|| format!("unable to symlink from {} to {}", link, original)) } pub(crate) fn finalize_splat( ctx: &Ctx, roots: &SplatRoots, sdk_headers: Vec<SdkHeaders>, ) -> Result<(), Error> { let mut files: std::collections::HashMap< _, _, std::hash::BuildHasherDefault<twox_hash::XxHash64>, > = Default::default(); struct Header<'root> { root: &'root SdkHeaders, path: PathBuf, } for hdrs in &sdk_headers { for (k, v) in &hdrs.inner { let existing = files.insert( k, Header { root: hdrs, path: v.clone(), }, ); if let Some(existing) = existing { panic!("already have {} matching {}", existing.path, v); } } } let mut includes: std::collections::HashSet< _, std::hash::BuildHasherDefault<twox_hash::XxHash64>, > = Default::default(); // Many headers won't necessarily be referenced internally by an all // lower case filename, even when that is common from outside the sdk // for basically all files (eg windows.h, psapi.h etc) includes.extend(files.values().filter_map(|fpath| { fpath .root .get_relative_path(&fpath.path) .ok() .and_then(|rel_path| { let rp = rel_path.as_str(); // Ignore the 2 opengl includes, since they are the one exception // that all subdirectories are lowercased if rel_path.starts_with("gl/") { return None; } rp.contains(|c: char| c.is_ascii_uppercase()) .then(|| PathBuf::from(rp.to_ascii_lowercase())) }) })); let regex = regex::bytes::Regex::new(r#"#include\s+(?:"|<)([^">]+)(?:"|>)?"#).unwrap(); let pb = indicatif::ProgressBar::with_draw_target(files.len() as u64, ctx.draw_target.into()) .with_style( indicatif::ProgressStyle::default_bar() .template( "{spinner:.green} {prefix:.bold} [{elapsed}] {wide_bar:.green} {pos}/{len}", ) .progress_chars("█▇▆▅▄▃▂▁ "), ); pb.set_prefix("symlinks"); pb.set_message("🔍 includes"); // Scan all of the files in the include directory for includes so that // we can add symlinks to at least make the SDK headers internally consistent for file in files.values() { // Of course, there are files with non-utf8 encoding :p let contents = std::fs::read(&file.path).with_context(|| format!("unable to read {}", file.path))?; for caps in regex.captures_iter(&contents) { let rel_path = std::str::from_utf8(&caps[1]).with_context(|| { format!( "{} contained an include with non-utf8 characters", file.path ) })?; // TODO: Some includes, particularly in [wrl](https://docs.microsoft.com/en-us/cpp/cppcx/wrl/windows-runtime-cpp-template-library-wrl?view=msvc-170) // use incorrect `\` path separators, this is hopefully not an issue // since no one cares about that target? But if it is a problem // we'll need to actually modify the include to fix the path. :-/ if !includes.contains(Path::new(rel_path)) { includes.insert(PathBuf::from(rel_path)); } } pb.inc(1); } pb.finish(); for include in includes { let lower_hash = calc_lower_hash(include.as_str()); match files.get(&lower_hash) { Some(disk_file) => match (disk_file.path.file_name(), include.file_name()) { (Some(disk_name), Some(include_name)) if disk_name != include_name => { let mut link = disk_file.path.clone(); link.pop(); link.push(include_name); symlink(disk_name, &link)?; } _ => {} }, None => { tracing::debug!( "SDK include for '{}' was not found in the SDK headers", include ); } } } // There is a um/gl directory, but of course there is an include for GL/ // instead, so fix that as well :p symlink("gl", &roots.sdk.join("include/um/GL"))?; Ok(()) } use std::hash::Hasher; #[inline] fn calc_lower_hash(path: &str) -> u64 { let mut hasher = twox_hash::XxHash64::with_seed(0); for c in path.chars().map(|c| c.to_ascii_lowercase() as u8) { hasher.write_u8(c); } hasher.finish() }
35.858713
160
0.459855
6162840c5bbbca5e1c5e1aba9dcbceb871a10a39
2,795
/*********************************************************************** * panel-prototype/src/widgets/register_display.rs * Module "widgets::register_display". * Panel lamps. * Copyright (C) 2020, Paul Kimpel. * Licensed under the MIT License, see * http://www.opensource.org/licenses/mit-license.php ************************************************************************ * Modification log. * 2020-02-16 P.Kimpel * Original version, cloned from widgets/register_lamp.rs. ***********************************************************************/ use imgui::{im_str, ImStr, StyleColor, StyleVar, Ui}; use super::*; pub struct RegisterDisplay<'a> { pub position: Position, pub frame_size: FrameSize, pub lamp_spacing: f32, pub colors: &'a [Color4], pub active_color: Color4, pub border_color: Color4, pub border_size: f32, pub border_rounding: f32, pub label_color: Color4, pub label_text: &'a ImStr } impl<'a> Default for RegisterDisplay<'a> { fn default() -> Self { let label_text = im_str!(""); RegisterDisplay { position: [0.0, 0.0], frame_size: [12.0, 12.0], lamp_spacing: 2.0, colors: &super::NEON_LEVEL, active_color: GRAY_COLOR, border_color: BLACK_COLOR, border_size: 0.0, border_rounding: 6.0, label_color: BLACK_COLOR, label_text } } } impl<'a> RegisterDisplay<'a> { pub fn build(&self, ui: &Ui, glow: &[f32]) -> Vec<bool> { let t0 = ui.push_style_vars(&[ StyleVar::FrameRounding(self.border_rounding), StyleVar::FrameBorderSize(self.border_size) ]); let t1 = ui.push_style_colors(&[ (StyleColor::Text, self.label_color), (StyleColor::Border, self.border_color), (StyleColor::ButtonActive, self.active_color) ]); let mut clicks = Vec::<bool>::with_capacity(glow.len()); let increment = self.frame_size[0] + self.lamp_spacing; let mut x = self.position[0] + increment*(glow.len()-1) as f32; let y = self.position[1]; for g in glow.iter() { let level = (*g*(self.colors.len()-1) as f32).round() as usize; let color = self.colors[level]; let t2 = ui.push_style_colors(&[ (StyleColor::Button, color), (StyleColor::ButtonHovered, color) ]); ui.set_cursor_pos([x, y]); let clicked = ui.button(self.label_text, self.frame_size); clicks.push(clicked); t2.pop(&ui); x -= increment; } t1.pop(&ui); t0.pop(&ui); clicks } }
31.761364
75
0.519141
87a72efbe08e51fe8f77d5fdd857739112f28c63
1,160
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(untagged_unions)] #![allow(dead_code)] #![deny(unions_with_drop_fields)] union U { a: u8, // OK } union W { a: String, //~ ERROR union contains a field with possibly non-trivial drop code b: String, // OK, only one field is reported } struct S(String); // `S` doesn't implement `Drop` trait, but still has non-trivial destructor union Y { a: S, //~ ERROR union contains a field with possibly non-trivial drop code } // We don't know if `T` is trivially-destructable or not until trans union J<T> { a: T, //~ ERROR union contains a field with possibly non-trivial drop code } union H<T: Copy> { a: T, // OK, `T` is `Copy`, no destructor } fn main() {}
28.292683
83
0.693966
18976c9aee23eb2f979d3e48c068fe3196710296
32,275
use super::{infer, type_at, type_at_pos}; use crate::test_db::TestDB; use insta::assert_snapshot; use ra_db::fixture::WithFixture; #[test] fn infer_box() { let (db, pos) = TestDB::with_position( r#" //- /main.rs crate:main deps:std fn test() { let x = box 1; let t = (x, box x, box &1, box [1]); t<|>; } //- /std.rs crate:std #[prelude_import] use prelude::*; mod prelude {} mod boxed { pub struct Box<T: ?Sized> { inner: *mut T, } } "#, ); assert_eq!("(Box<i32>, Box<Box<i32>>, Box<&i32>, Box<[i32;_]>)", type_at_pos(&db, pos)); } #[test] fn infer_adt_self() { let (db, pos) = TestDB::with_position( r#" //- /main.rs enum Nat { Succ(Self), Demo(Nat), Zero } fn test() { let foo: Nat = Nat::Zero; if let Nat::Succ(x) = foo { x<|> } } "#, ); assert_eq!("Nat", type_at_pos(&db, pos)); } #[test] fn infer_ranges() { let (db, pos) = TestDB::with_position( r#" //- /main.rs crate:main deps:std fn test() { let a = ..; let b = 1..; let c = ..2u32; let d = 1..2usize; let e = ..=10; let f = 'a'..='z'; let t = (a, b, c, d, e, f); t<|>; } //- /std.rs crate:std #[prelude_import] use prelude::*; mod prelude {} pub mod ops { pub struct Range<Idx> { pub start: Idx, pub end: Idx, } pub struct RangeFrom<Idx> { pub start: Idx, } struct RangeFull; pub struct RangeInclusive<Idx> { start: Idx, end: Idx, is_empty: u8, } pub struct RangeTo<Idx> { pub end: Idx, } pub struct RangeToInclusive<Idx> { pub end: Idx, } } "#, ); assert_eq!( "(RangeFull, RangeFrom<i32>, RangeTo<u32>, Range<usize>, RangeToInclusive<i32>, RangeInclusive<char>)", type_at_pos(&db, pos), ); } #[test] fn infer_while_let() { let (db, pos) = TestDB::with_position( r#" //- /main.rs enum Option<T> { Some(T), None } fn test() { let foo: Option<f32> = None; while let Option::Some(x) = foo { <|>x } } "#, ); assert_eq!("f32", type_at_pos(&db, pos)); } #[test] fn infer_basics() { assert_snapshot!( infer(r#" fn test(a: u32, b: isize, c: !, d: &str) { a; b; c; d; 1usize; 1isize; "test"; 1.0f32; }"#), @r###" [9; 10) 'a': u32 [17; 18) 'b': isize [27; 28) 'c': ! [33; 34) 'd': &str [42; 121) '{ ...f32; }': ! [48; 49) 'a': u32 [55; 56) 'b': isize [62; 63) 'c': ! [69; 70) 'd': &str [76; 82) '1usize': usize [88; 94) '1isize': isize [100; 106) '"test"': &str [112; 118) '1.0f32': f32 "### ); } #[test] fn infer_let() { assert_snapshot!( infer(r#" fn test() { let a = 1isize; let b: usize = 1; let c = b; let d: u32; let e; let f: i32 = e; } "#), @r###" [11; 118) '{ ...= e; }': () [21; 22) 'a': isize [25; 31) '1isize': isize [41; 42) 'b': usize [52; 53) '1': usize [63; 64) 'c': usize [67; 68) 'b': usize [78; 79) 'd': u32 [94; 95) 'e': i32 [105; 106) 'f': i32 [114; 115) 'e': i32 "### ); } #[test] fn infer_paths() { assert_snapshot!( infer(r#" fn a() -> u32 { 1 } mod b { fn c() -> u32 { 1 } } fn test() { a(); b::c(); } "#), @r###" [15; 20) '{ 1 }': u32 [17; 18) '1': u32 [48; 53) '{ 1 }': u32 [50; 51) '1': u32 [67; 91) '{ ...c(); }': () [73; 74) 'a': fn a() -> u32 [73; 76) 'a()': u32 [82; 86) 'b::c': fn c() -> u32 [82; 88) 'b::c()': u32 "### ); } #[test] fn infer_path_type() { assert_snapshot!( infer(r#" struct S; impl S { fn foo() -> i32 { 1 } } fn test() { S::foo(); <S>::foo(); } "#), @r###" [41; 46) '{ 1 }': i32 [43; 44) '1': i32 [60; 93) '{ ...o(); }': () [66; 72) 'S::foo': fn foo() -> i32 [66; 74) 'S::foo()': i32 [80; 88) '<S>::foo': fn foo() -> i32 [80; 90) '<S>::foo()': i32 "### ); } #[test] fn infer_struct() { assert_snapshot!( infer(r#" struct A { b: B, c: C, } struct B; struct C(usize); fn test() { let c = C(1); B; let a: A = A { b: B, c: C(1) }; a.b; a.c; } "#), @r###" [72; 154) '{ ...a.c; }': () [82; 83) 'c': C [86; 87) 'C': C(usize) -> C [86; 90) 'C(1)': C [88; 89) '1': usize [96; 97) 'B': B [107; 108) 'a': A [114; 133) 'A { b:...C(1) }': A [121; 122) 'B': B [127; 128) 'C': C(usize) -> C [127; 131) 'C(1)': C [129; 130) '1': usize [139; 140) 'a': A [139; 142) 'a.b': B [148; 149) 'a': A [148; 151) 'a.c': C "### ); } #[test] fn infer_enum() { assert_snapshot!( infer(r#" enum E { V1 { field: u32 }, V2 } fn test() { E::V1 { field: 1 }; E::V2; }"#), @r###" [48; 82) '{ E:...:V2; }': () [52; 70) 'E::V1 ...d: 1 }': E [67; 68) '1': u32 [74; 79) 'E::V2': E "### ); } #[test] fn infer_refs() { assert_snapshot!( infer(r#" fn test(a: &u32, b: &mut u32, c: *const u32, d: *mut u32) { a; *a; &a; &mut a; b; *b; &b; c; *c; d; *d; } "#), @r###" [9; 10) 'a': &u32 [18; 19) 'b': &mut u32 [31; 32) 'c': *const u32 [46; 47) 'd': *mut u32 [59; 150) '{ ... *d; }': () [65; 66) 'a': &u32 [72; 74) '*a': u32 [73; 74) 'a': &u32 [80; 82) '&a': &&u32 [81; 82) 'a': &u32 [88; 94) '&mut a': &mut &u32 [93; 94) 'a': &u32 [100; 101) 'b': &mut u32 [107; 109) '*b': u32 [108; 109) 'b': &mut u32 [115; 117) '&b': &&mut u32 [116; 117) 'b': &mut u32 [123; 124) 'c': *const u32 [130; 132) '*c': u32 [131; 132) 'c': *const u32 [138; 139) 'd': *mut u32 [145; 147) '*d': u32 [146; 147) 'd': *mut u32 "### ); } #[test] fn infer_literals() { assert_snapshot!( infer(r##" fn test() { 5i32; 5f32; 5f64; "hello"; b"bytes"; 'c'; b'b'; 3.14; 5000; false; true; r#" //! doc // non-doc mod foo {} "#; br#"yolo"#; } "##), @r###" [11; 221) '{ ...o"#; }': () [17; 21) '5i32': i32 [27; 31) '5f32': f32 [37; 41) '5f64': f64 [47; 54) '"hello"': &str [60; 68) 'b"bytes"': &[u8] [74; 77) ''c'': char [83; 87) 'b'b'': u8 [93; 97) '3.14': f64 [103; 107) '5000': i32 [113; 118) 'false': bool [124; 128) 'true': bool [134; 202) 'r#" ... "#': &str [208; 218) 'br#"yolo"#': &[u8] "### ); } #[test] fn infer_unary_op() { assert_snapshot!( infer(r#" enum SomeType {} fn test(x: SomeType) { let b = false; let c = !b; let a = 100; let d: i128 = -a; let e = -100; let f = !!!true; let g = !42; let h = !10u32; let j = !a; -3.14; !3; -x; !x; -"hello"; !"hello"; } "#), @r###" [27; 28) 'x': SomeType [40; 272) '{ ...lo"; }': () [50; 51) 'b': bool [54; 59) 'false': bool [69; 70) 'c': bool [73; 75) '!b': bool [74; 75) 'b': bool [85; 86) 'a': i128 [89; 92) '100': i128 [102; 103) 'd': i128 [112; 114) '-a': i128 [113; 114) 'a': i128 [124; 125) 'e': i32 [128; 132) '-100': i32 [129; 132) '100': i32 [142; 143) 'f': bool [146; 153) '!!!true': bool [147; 153) '!!true': bool [148; 153) '!true': bool [149; 153) 'true': bool [163; 164) 'g': i32 [167; 170) '!42': i32 [168; 170) '42': i32 [180; 181) 'h': u32 [184; 190) '!10u32': u32 [185; 190) '10u32': u32 [200; 201) 'j': i128 [204; 206) '!a': i128 [205; 206) 'a': i128 [212; 217) '-3.14': f64 [213; 217) '3.14': f64 [223; 225) '!3': i32 [224; 225) '3': i32 [231; 233) '-x': {unknown} [232; 233) 'x': SomeType [239; 241) '!x': {unknown} [240; 241) 'x': SomeType [247; 255) '-"hello"': {unknown} [248; 255) '"hello"': &str [261; 269) '!"hello"': {unknown} [262; 269) '"hello"': &str "### ); } #[test] fn infer_backwards() { assert_snapshot!( infer(r#" fn takes_u32(x: u32) {} struct S { i32_field: i32 } fn test() -> &mut &f64 { let a = unknown_function(); takes_u32(a); let b = unknown_function(); S { i32_field: b }; let c = unknown_function(); &mut &c } "#), @r###" [14; 15) 'x': u32 [22; 24) '{}': () [78; 231) '{ ...t &c }': &mut &f64 [88; 89) 'a': u32 [92; 108) 'unknow...nction': {unknown} [92; 110) 'unknow...tion()': u32 [116; 125) 'takes_u32': fn takes_u32(u32) -> () [116; 128) 'takes_u32(a)': () [126; 127) 'a': u32 [138; 139) 'b': i32 [142; 158) 'unknow...nction': {unknown} [142; 160) 'unknow...tion()': i32 [166; 184) 'S { i3...d: b }': S [181; 182) 'b': i32 [194; 195) 'c': f64 [198; 214) 'unknow...nction': {unknown} [198; 216) 'unknow...tion()': f64 [222; 229) '&mut &c': &mut &f64 [227; 229) '&c': &f64 [228; 229) 'c': f64 "### ); } #[test] fn infer_self() { assert_snapshot!( infer(r#" struct S; impl S { fn test(&self) { self; } fn test2(self: &Self) { self; } fn test3() -> Self { S {} } fn test4() -> Self { Self {} } } "#), @r###" [34; 38) 'self': &S [40; 61) '{ ... }': () [50; 54) 'self': &S [75; 79) 'self': &S [88; 109) '{ ... }': () [98; 102) 'self': &S [133; 153) '{ ... }': S [143; 147) 'S {}': S [177; 200) '{ ... }': S [187; 194) 'Self {}': S "### ); } #[test] fn infer_binary_op() { assert_snapshot!( infer(r#" fn f(x: bool) -> i32 { 0i32 } fn test() -> bool { let x = a && b; let y = true || false; let z = x == y; let t = x != y; let minus_forty: isize = -40isize; let h = minus_forty <= CONST_2; let c = f(z || y) + 5; let d = b; let g = minus_forty ^= i; let ten: usize = 10; let ten_is_eleven = ten == some_num; ten < 3 } "#), @r###" [6; 7) 'x': bool [22; 34) '{ 0i32 }': i32 [28; 32) '0i32': i32 [54; 370) '{ ... < 3 }': bool [64; 65) 'x': bool [68; 69) 'a': bool [68; 74) 'a && b': bool [73; 74) 'b': bool [84; 85) 'y': bool [88; 92) 'true': bool [88; 101) 'true || false': bool [96; 101) 'false': bool [111; 112) 'z': bool [115; 116) 'x': bool [115; 121) 'x == y': bool [120; 121) 'y': bool [131; 132) 't': bool [135; 136) 'x': bool [135; 141) 'x != y': bool [140; 141) 'y': bool [151; 162) 'minus_forty': isize [172; 180) '-40isize': isize [173; 180) '40isize': isize [190; 191) 'h': bool [194; 205) 'minus_forty': isize [194; 216) 'minus_...ONST_2': bool [209; 216) 'CONST_2': isize [226; 227) 'c': i32 [230; 231) 'f': fn f(bool) -> i32 [230; 239) 'f(z || y)': i32 [230; 243) 'f(z || y) + 5': i32 [232; 233) 'z': bool [232; 238) 'z || y': bool [237; 238) 'y': bool [242; 243) '5': i32 [253; 254) 'd': {unknown} [257; 258) 'b': {unknown} [268; 269) 'g': () [272; 283) 'minus_forty': isize [272; 288) 'minus_...y ^= i': () [287; 288) 'i': isize [298; 301) 'ten': usize [311; 313) '10': usize [323; 336) 'ten_is_eleven': bool [339; 342) 'ten': usize [339; 354) 'ten == some_num': bool [346; 354) 'some_num': usize [361; 364) 'ten': usize [361; 368) 'ten < 3': bool [367; 368) '3': usize "### ); } #[test] fn infer_field_autoderef() { assert_snapshot!( infer(r#" struct A { b: B, } struct B; fn test1(a: A) { let a1 = a; a1.b; let a2 = &a; a2.b; let a3 = &mut a; a3.b; let a4 = &&&&&&&a; a4.b; let a5 = &mut &&mut &&mut a; a5.b; } fn test2(a1: *const A, a2: *mut A) { a1.b; a2.b; } "#), @r###" [44; 45) 'a': A [50; 213) '{ ...5.b; }': () [60; 62) 'a1': A [65; 66) 'a': A [72; 74) 'a1': A [72; 76) 'a1.b': B [86; 88) 'a2': &A [91; 93) '&a': &A [92; 93) 'a': A [99; 101) 'a2': &A [99; 103) 'a2.b': B [113; 115) 'a3': &mut A [118; 124) '&mut a': &mut A [123; 124) 'a': A [130; 132) 'a3': &mut A [130; 134) 'a3.b': B [144; 146) 'a4': &&&&&&&A [149; 157) '&&&&&&&a': &&&&&&&A [150; 157) '&&&&&&a': &&&&&&A [151; 157) '&&&&&a': &&&&&A [152; 157) '&&&&a': &&&&A [153; 157) '&&&a': &&&A [154; 157) '&&a': &&A [155; 157) '&a': &A [156; 157) 'a': A [163; 165) 'a4': &&&&&&&A [163; 167) 'a4.b': B [177; 179) 'a5': &mut &&mut &&mut A [182; 200) '&mut &...&mut a': &mut &&mut &&mut A [187; 200) '&&mut &&mut a': &&mut &&mut A [188; 200) '&mut &&mut a': &mut &&mut A [193; 200) '&&mut a': &&mut A [194; 200) '&mut a': &mut A [199; 200) 'a': A [206; 208) 'a5': &mut &&mut &&mut A [206; 210) 'a5.b': B [224; 226) 'a1': *const A [238; 240) 'a2': *mut A [250; 273) '{ ...2.b; }': () [256; 258) 'a1': *const A [256; 260) 'a1.b': B [266; 268) 'a2': *mut A [266; 270) 'a2.b': B "### ); } #[test] fn infer_argument_autoderef() { assert_snapshot!( infer(r#" #[lang = "deref"] pub trait Deref { type Target; fn deref(&self) -> &Self::Target; } struct A<T>(T); impl<T> A<T> { fn foo(&self) -> &T { &self.0 } } struct B<T>(T); impl<T> Deref for B<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.0 } } fn test() { let t = A::foo(&&B(B(A(42)))); } "#), @r###" [68; 72) 'self': &Self [139; 143) 'self': &A<T> [151; 174) '{ ... }': &T [161; 168) '&self.0': &T [162; 166) 'self': &A<T> [162; 168) 'self.0': T [255; 259) 'self': &B<T> [278; 301) '{ ... }': &T [288; 295) '&self.0': &T [289; 293) 'self': &B<T> [289; 295) 'self.0': T [315; 353) '{ ...))); }': () [325; 326) 't': &i32 [329; 335) 'A::foo': fn foo<i32>(&A<T>) -> &T [329; 350) 'A::foo...42))))': &i32 [336; 349) '&&B(B(A(42)))': &&B<B<A<i32>>> [337; 349) '&B(B(A(42)))': &B<B<A<i32>>> [338; 339) 'B': B<B<A<i32>>>(T) -> B<T> [338; 349) 'B(B(A(42)))': B<B<A<i32>>> [340; 341) 'B': B<A<i32>>(T) -> B<T> [340; 348) 'B(A(42))': B<A<i32>> [342; 343) 'A': A<i32>(T) -> A<T> [342; 347) 'A(42)': A<i32> [344; 346) '42': i32 "### ); } #[test] fn infer_method_argument_autoderef() { assert_snapshot!( infer(r#" #[lang = "deref"] pub trait Deref { type Target; fn deref(&self) -> &Self::Target; } struct A<T>(*mut T); impl<T> A<T> { fn foo(&self, x: &A<T>) -> &T { &*x.0 } } struct B<T>(T); impl<T> Deref for B<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.0 } } fn test(a: A<i32>) { let t = A(0 as *mut _).foo(&&B(B(a))); } "#), @r###" [68; 72) 'self': &Self [144; 148) 'self': &A<T> [150; 151) 'x': &A<T> [166; 187) '{ ... }': &T [176; 181) '&*x.0': &T [177; 181) '*x.0': T [178; 179) 'x': &A<T> [178; 181) 'x.0': *mut T [268; 272) 'self': &B<T> [291; 314) '{ ... }': &T [301; 308) '&self.0': &T [302; 306) 'self': &B<T> [302; 308) 'self.0': T [326; 327) 'a': A<i32> [337; 383) '{ ...))); }': () [347; 348) 't': &i32 [351; 352) 'A': A<i32>(*mut T) -> A<T> [351; 365) 'A(0 as *mut _)': A<i32> [351; 380) 'A(0 as...B(a)))': &i32 [353; 354) '0': i32 [353; 364) '0 as *mut _': *mut i32 [370; 379) '&&B(B(a))': &&B<B<A<i32>>> [371; 379) '&B(B(a))': &B<B<A<i32>>> [372; 373) 'B': B<B<A<i32>>>(T) -> B<T> [372; 379) 'B(B(a))': B<B<A<i32>>> [374; 375) 'B': B<A<i32>>(T) -> B<T> [374; 378) 'B(a)': B<A<i32>> [376; 377) 'a': A<i32> "### ); } #[test] fn infer_in_elseif() { assert_snapshot!( infer(r#" struct Foo { field: i32 } fn main(foo: Foo) { if true { } else if false { foo.field } } "#), @r###" [35; 38) 'foo': Foo [45; 109) '{ ... } }': () [51; 107) 'if tru... }': () [54; 58) 'true': bool [59; 67) '{ }': () [73; 107) 'if fal... }': () [76; 81) 'false': bool [82; 107) '{ ... }': i32 [92; 95) 'foo': Foo [92; 101) 'foo.field': i32 "### ) } #[test] fn infer_if_match_with_return() { assert_snapshot!( infer(r#" fn foo() { let _x1 = if true { 1 } else { return; }; let _x2 = if true { 2 } else { return }; let _x3 = match true { true => 3, _ => { return; } }; let _x4 = match true { true => 4, _ => return }; }"#), @r###" [10; 323) '{ ... }; }': () [20; 23) '_x1': i32 [26; 80) 'if tru... }': i32 [29; 33) 'true': bool [34; 51) '{ ... }': i32 [44; 45) '1': i32 [57; 80) '{ ... }': ! [67; 73) 'return': ! [90; 93) '_x2': i32 [96; 149) 'if tru... }': i32 [99; 103) 'true': bool [104; 121) '{ ... }': i32 [114; 115) '2': i32 [127; 149) '{ ... }': ! [137; 143) 'return': ! [159; 162) '_x3': i32 [165; 247) 'match ... }': i32 [171; 175) 'true': bool [186; 190) 'true': bool [194; 195) '3': i32 [205; 206) '_': bool [210; 241) '{ ... }': ! [224; 230) 'return': ! [257; 260) '_x4': i32 [263; 320) 'match ... }': i32 [269; 273) 'true': bool [284; 288) 'true': bool [292; 293) '4': i32 [303; 304) '_': bool [308; 314) 'return': ! "### ) } #[test] fn infer_inherent_method() { assert_snapshot!( infer(r#" struct A; impl A { fn foo(self, x: u32) -> i32 {} } mod b { impl super::A { fn bar(&self, x: u64) -> i64 {} } } fn test(a: A) { a.foo(1); (&a).bar(1); a.bar(1); } "#), @r###" [32; 36) 'self': A [38; 39) 'x': u32 [53; 55) '{}': () [103; 107) 'self': &A [109; 110) 'x': u64 [124; 126) '{}': () [144; 145) 'a': A [150; 198) '{ ...(1); }': () [156; 157) 'a': A [156; 164) 'a.foo(1)': i32 [162; 163) '1': u32 [170; 181) '(&a).bar(1)': i64 [171; 173) '&a': &A [172; 173) 'a': A [179; 180) '1': u64 [187; 188) 'a': A [187; 195) 'a.bar(1)': i64 [193; 194) '1': u64 "### ); } #[test] fn infer_inherent_method_str() { assert_snapshot!( infer(r#" #[lang = "str"] impl str { fn foo(&self) -> i32 {} } fn test() { "foo".foo(); } "#), @r###" [40; 44) 'self': &str [53; 55) '{}': () [69; 89) '{ ...o(); }': () [75; 80) '"foo"': &str [75; 86) '"foo".foo()': i32 "### ); } #[test] fn infer_tuple() { assert_snapshot!( infer(r#" fn test(x: &str, y: isize) { let a: (u32, &str) = (1, "a"); let b = (a, x); let c = (y, x); let d = (c, x); let e = (1, "e"); let f = (e, "d"); } "#), @r###" [9; 10) 'x': &str [18; 19) 'y': isize [28; 170) '{ ...d"); }': () [38; 39) 'a': (u32, &str) [55; 63) '(1, "a")': (u32, &str) [56; 57) '1': u32 [59; 62) '"a"': &str [73; 74) 'b': ((u32, &str), &str) [77; 83) '(a, x)': ((u32, &str), &str) [78; 79) 'a': (u32, &str) [81; 82) 'x': &str [93; 94) 'c': (isize, &str) [97; 103) '(y, x)': (isize, &str) [98; 99) 'y': isize [101; 102) 'x': &str [113; 114) 'd': ((isize, &str), &str) [117; 123) '(c, x)': ((isize, &str), &str) [118; 119) 'c': (isize, &str) [121; 122) 'x': &str [133; 134) 'e': (i32, &str) [137; 145) '(1, "e")': (i32, &str) [138; 139) '1': i32 [141; 144) '"e"': &str [155; 156) 'f': ((i32, &str), &str) [159; 167) '(e, "d")': ((i32, &str), &str) [160; 161) 'e': (i32, &str) [163; 166) '"d"': &str "### ); } #[test] fn infer_array() { assert_snapshot!( infer(r#" fn test(x: &str, y: isize) { let a = [x]; let b = [a, a]; let c = [b, b]; let d = [y, 1, 2, 3]; let d = [1, y, 2, 3]; let e = [y]; let f = [d, d]; let g = [e, e]; let h = [1, 2]; let i = ["a", "b"]; let b = [a, ["b"]]; let x: [u8; 0] = []; } "#), @r###" [9; 10) 'x': &str [18; 19) 'y': isize [28; 293) '{ ... []; }': () [38; 39) 'a': [&str;_] [42; 45) '[x]': [&str;_] [43; 44) 'x': &str [55; 56) 'b': [[&str;_];_] [59; 65) '[a, a]': [[&str;_];_] [60; 61) 'a': [&str;_] [63; 64) 'a': [&str;_] [75; 76) 'c': [[[&str;_];_];_] [79; 85) '[b, b]': [[[&str;_];_];_] [80; 81) 'b': [[&str;_];_] [83; 84) 'b': [[&str;_];_] [96; 97) 'd': [isize;_] [100; 112) '[y, 1, 2, 3]': [isize;_] [101; 102) 'y': isize [104; 105) '1': isize [107; 108) '2': isize [110; 111) '3': isize [122; 123) 'd': [isize;_] [126; 138) '[1, y, 2, 3]': [isize;_] [127; 128) '1': isize [130; 131) 'y': isize [133; 134) '2': isize [136; 137) '3': isize [148; 149) 'e': [isize;_] [152; 155) '[y]': [isize;_] [153; 154) 'y': isize [165; 166) 'f': [[isize;_];_] [169; 175) '[d, d]': [[isize;_];_] [170; 171) 'd': [isize;_] [173; 174) 'd': [isize;_] [185; 186) 'g': [[isize;_];_] [189; 195) '[e, e]': [[isize;_];_] [190; 191) 'e': [isize;_] [193; 194) 'e': [isize;_] [206; 207) 'h': [i32;_] [210; 216) '[1, 2]': [i32;_] [211; 212) '1': i32 [214; 215) '2': i32 [226; 227) 'i': [&str;_] [230; 240) '["a", "b"]': [&str;_] [231; 234) '"a"': &str [236; 239) '"b"': &str [251; 252) 'b': [[&str;_];_] [255; 265) '[a, ["b"]]': [[&str;_];_] [256; 257) 'a': [&str;_] [259; 264) '["b"]': [&str;_] [260; 263) '"b"': &str [275; 276) 'x': [u8;_] [288; 290) '[]': [u8;_] "### ); } #[test] fn infer_struct_generics() { assert_snapshot!( infer(r#" struct A<T> { x: T, } fn test(a1: A<u32>, i: i32) { a1.x; let a2 = A { x: i }; a2.x; let a3 = A::<i128> { x: 1 }; a3.x; } "#), @r###" [36; 38) 'a1': A<u32> [48; 49) 'i': i32 [56; 147) '{ ...3.x; }': () [62; 64) 'a1': A<u32> [62; 66) 'a1.x': u32 [76; 78) 'a2': A<i32> [81; 91) 'A { x: i }': A<i32> [88; 89) 'i': i32 [97; 99) 'a2': A<i32> [97; 101) 'a2.x': i32 [111; 113) 'a3': A<i128> [116; 134) 'A::<i1...x: 1 }': A<i128> [131; 132) '1': i128 [140; 142) 'a3': A<i128> [140; 144) 'a3.x': i128 "### ); } #[test] fn infer_tuple_struct_generics() { assert_snapshot!( infer(r#" struct A<T>(T); enum Option<T> { Some(T), None } use Option::*; fn test() { A(42); A(42u128); Some("x"); Option::Some("x"); None; let x: Option<i64> = None; } "#), @r###" [76; 184) '{ ...one; }': () [82; 83) 'A': A<i32>(T) -> A<T> [82; 87) 'A(42)': A<i32> [84; 86) '42': i32 [93; 94) 'A': A<u128>(T) -> A<T> [93; 102) 'A(42u128)': A<u128> [95; 101) '42u128': u128 [108; 112) 'Some': Some<&str>(T) -> Option<T> [108; 117) 'Some("x")': Option<&str> [113; 116) '"x"': &str [123; 135) 'Option::Some': Some<&str>(T) -> Option<T> [123; 140) 'Option...e("x")': Option<&str> [136; 139) '"x"': &str [146; 150) 'None': Option<{unknown}> [160; 161) 'x': Option<i64> [177; 181) 'None': Option<i64> "### ); } #[test] fn infer_function_generics() { assert_snapshot!( infer(r#" fn id<T>(t: T) -> T { t } fn test() { id(1u32); id::<i128>(1); let x: u64 = id(1); } "#), @r###" [10; 11) 't': T [21; 26) '{ t }': T [23; 24) 't': T [38; 98) '{ ...(1); }': () [44; 46) 'id': fn id<u32>(T) -> T [44; 52) 'id(1u32)': u32 [47; 51) '1u32': u32 [58; 68) 'id::<i128>': fn id<i128>(T) -> T [58; 71) 'id::<i128>(1)': i128 [69; 70) '1': i128 [81; 82) 'x': u64 [90; 92) 'id': fn id<u64>(T) -> T [90; 95) 'id(1)': u64 [93; 94) '1': u64 "### ); } #[test] fn infer_impl_generics() { assert_snapshot!( infer(r#" struct A<T1, T2> { x: T1, y: T2, } impl<Y, X> A<X, Y> { fn x(self) -> X { self.x } fn y(self) -> Y { self.y } fn z<T>(self, t: T) -> (X, Y, T) { (self.x, self.y, t) } } fn test() -> i128 { let a = A { x: 1u64, y: 1i64 }; a.x(); a.y(); a.z(1i128); a.z::<u128>(1); } "#), @r###" [74; 78) 'self': A<X, Y> [85; 107) '{ ... }': X [95; 99) 'self': A<X, Y> [95; 101) 'self.x': X [117; 121) 'self': A<X, Y> [128; 150) '{ ... }': Y [138; 142) 'self': A<X, Y> [138; 144) 'self.y': Y [163; 167) 'self': A<X, Y> [169; 170) 't': T [188; 223) '{ ... }': (X, Y, T) [198; 217) '(self.....y, t)': (X, Y, T) [199; 203) 'self': A<X, Y> [199; 205) 'self.x': X [207; 211) 'self': A<X, Y> [207; 213) 'self.y': Y [215; 216) 't': T [245; 342) '{ ...(1); }': () [255; 256) 'a': A<u64, i64> [259; 281) 'A { x:...1i64 }': A<u64, i64> [266; 270) '1u64': u64 [275; 279) '1i64': i64 [287; 288) 'a': A<u64, i64> [287; 292) 'a.x()': u64 [298; 299) 'a': A<u64, i64> [298; 303) 'a.y()': i64 [309; 310) 'a': A<u64, i64> [309; 319) 'a.z(1i128)': (u64, i64, i128) [313; 318) '1i128': i128 [325; 326) 'a': A<u64, i64> [325; 339) 'a.z::<u128>(1)': (u64, i64, u128) [337; 338) '1': u128 "### ); } #[test] fn infer_impl_generics_with_autoderef() { assert_snapshot!( infer(r#" enum Option<T> { Some(T), None, } impl<T> Option<T> { fn as_ref(&self) -> Option<&T> {} } fn test(o: Option<u32>) { (&o).as_ref(); o.as_ref(); } "#), @r###" [78; 82) 'self': &Option<T> [98; 100) '{}': () [111; 112) 'o': Option<u32> [127; 165) '{ ...f(); }': () [133; 146) '(&o).as_ref()': Option<&u32> [134; 136) '&o': &Option<u32> [135; 136) 'o': Option<u32> [152; 153) 'o': Option<u32> [152; 162) 'o.as_ref()': Option<&u32> "### ); } #[test] fn infer_generic_chain() { assert_snapshot!( infer(r#" struct A<T> { x: T, } impl<T2> A<T2> { fn x(self) -> T2 { self.x } } fn id<T>(t: T) -> T { t } fn test() -> i128 { let x = 1; let y = id(x); let a = A { x: id(y) }; let z = id(a.x); let b = A { x: z }; b.x() } "#), @r###" [53; 57) 'self': A<T2> [65; 87) '{ ... }': T2 [75; 79) 'self': A<T2> [75; 81) 'self.x': T2 [99; 100) 't': T [110; 115) '{ t }': T [112; 113) 't': T [135; 261) '{ ....x() }': i128 [146; 147) 'x': i128 [150; 151) '1': i128 [162; 163) 'y': i128 [166; 168) 'id': fn id<i128>(T) -> T [166; 171) 'id(x)': i128 [169; 170) 'x': i128 [182; 183) 'a': A<i128> [186; 200) 'A { x: id(y) }': A<i128> [193; 195) 'id': fn id<i128>(T) -> T [193; 198) 'id(y)': i128 [196; 197) 'y': i128 [211; 212) 'z': i128 [215; 217) 'id': fn id<i128>(T) -> T [215; 222) 'id(a.x)': i128 [218; 219) 'a': A<i128> [218; 221) 'a.x': i128 [233; 234) 'b': A<i128> [237; 247) 'A { x: z }': A<i128> [244; 245) 'z': i128 [254; 255) 'b': A<i128> [254; 259) 'b.x()': i128 "### ); } #[test] fn infer_associated_const() { assert_snapshot!( infer(r#" struct Struct; impl Struct { const FOO: u32 = 1; } enum Enum {} impl Enum { const BAR: u32 = 2; } trait Trait { const ID: u32; } struct TraitTest; impl Trait for TraitTest { const ID: u32 = 5; } fn test() { let x = Struct::FOO; let y = Enum::BAR; let z = TraitTest::ID; } "#), @r###" [52; 53) '1': u32 [105; 106) '2': u32 [213; 214) '5': u32 [229; 307) '{ ...:ID; }': () [239; 240) 'x': u32 [243; 254) 'Struct::FOO': u32 [264; 265) 'y': u32 [268; 277) 'Enum::BAR': u32 [287; 288) 'z': u32 [291; 304) 'TraitTest::ID': u32 "### ); } #[test] fn infer_type_alias() { assert_snapshot!( infer(r#" struct A<X, Y> { x: X, y: Y } type Foo = A<u32, i128>; type Bar<T> = A<T, u128>; type Baz<U, V> = A<V, U>; fn test(x: Foo, y: Bar<&str>, z: Baz<i8, u8>) { x.x; x.y; y.x; y.y; z.x; z.y; } "#), @r###" [116; 117) 'x': A<u32, i128> [124; 125) 'y': A<&str, u128> [138; 139) 'z': A<u8, i8> [154; 211) '{ ...z.y; }': () [160; 161) 'x': A<u32, i128> [160; 163) 'x.x': u32 [169; 170) 'x': A<u32, i128> [169; 172) 'x.y': i128 [178; 179) 'y': A<&str, u128> [178; 181) 'y.x': &str [187; 188) 'y': A<&str, u128> [187; 190) 'y.y': u128 [196; 197) 'z': A<u8, i8> [196; 199) 'z.x': u8 [205; 206) 'z': A<u8, i8> [205; 208) 'z.y': i8 "### ) } #[test] fn recursive_type_alias() { assert_snapshot!( infer(r#" struct A<X> {} type Foo = Foo; type Bar = A<Bar>; fn test(x: Foo) {} "#), @r###" [59; 60) 'x': {unknown} [67; 69) '{}': () "### ) } #[test] fn infer_type_param() { assert_snapshot!( infer(r#" fn id<T>(x: T) -> T { x } fn clone<T>(x: &T) -> T { *x } fn test() { let y = 10u32; id(y); let x: bool = clone(z); id::<i128>(1); } "#), @r###" [10; 11) 'x': T [21; 30) '{ x }': T [27; 28) 'x': T [44; 45) 'x': &T [56; 66) '{ *x }': T [62; 64) '*x': T [63; 64) 'x': &T [78; 158) '{ ...(1); }': () [88; 89) 'y': u32 [92; 97) '10u32': u32 [103; 105) 'id': fn id<u32>(T) -> T [103; 108) 'id(y)': u32 [106; 107) 'y': u32 [118; 119) 'x': bool [128; 133) 'clone': fn clone<bool>(&T) -> T [128; 136) 'clone(z)': bool [134; 135) 'z': &bool [142; 152) 'id::<i128>': fn id<i128>(T) -> T [142; 155) 'id::<i128>(1)': i128 [153; 154) '1': i128 "### ); } #[test] fn infer_const() { assert_snapshot!( infer(r#" struct Foo; impl Foo { const ASSOC_CONST: u32 = 0; } const GLOBAL_CONST: u32 = 101; fn test() { const LOCAL_CONST: u32 = 99; let x = LOCAL_CONST; let z = GLOBAL_CONST; let id = Foo::ASSOC_CONST; } "#), @r###" [49; 50) '0': u32 [80; 83) '101': u32 [95; 213) '{ ...NST; }': () [138; 139) 'x': {unknown} [142; 153) 'LOCAL_CONST': {unknown} [163; 164) 'z': u32 [167; 179) 'GLOBAL_CONST': u32 [189; 191) 'id': u32 [194; 210) 'Foo::A..._CONST': u32 "### ); } #[test] fn infer_static() { assert_snapshot!( infer(r#" static GLOBAL_STATIC: u32 = 101; static mut GLOBAL_STATIC_MUT: u32 = 101; fn test() { static LOCAL_STATIC: u32 = 99; static mut LOCAL_STATIC_MUT: u32 = 99; let x = LOCAL_STATIC; let y = LOCAL_STATIC_MUT; let z = GLOBAL_STATIC; let w = GLOBAL_STATIC_MUT; } "#), @r###" [29; 32) '101': u32 [70; 73) '101': u32 [85; 280) '{ ...MUT; }': () [173; 174) 'x': {unknown} [177; 189) 'LOCAL_STATIC': {unknown} [199; 200) 'y': {unknown} [203; 219) 'LOCAL_...IC_MUT': {unknown} [229; 230) 'z': u32 [233; 246) 'GLOBAL_STATIC': u32 [256; 257) 'w': u32 [260; 277) 'GLOBAL...IC_MUT': u32 "### ); } #[test] fn shadowing_primitive() { let t = type_at( r#" //- /main.rs struct i32; struct Foo; impl i32 { fn foo(&self) -> Foo { Foo } } fn main() { let x: i32 = i32; x.foo()<|>; }"#, ); assert_eq!(t, "Foo"); } #[test] fn not_shadowing_primitive_by_module() { let t = type_at( r#" //- /str.rs fn foo() {} //- /main.rs mod str; fn foo() -> &'static str { "" } fn main() { foo()<|>; }"#, ); assert_eq!(t, "&str"); } #[test] fn not_shadowing_module_by_primitive() { let t = type_at( r#" //- /str.rs fn foo() -> u32 {0} //- /main.rs mod str; fn foo() -> &'static str { "" } fn main() { str::foo()<|>; }"#, ); assert_eq!(t, "u32"); }
20.059043
111
0.412084
5bf8da8a362342fc03d41f1f1881006948122eed
26,865
// Copyright 2019. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{fmt, fmt::Formatter, sync::Arc}; use aes_gcm::Aes256Gcm; use tari_common_types::{ transaction::TxId, types::{HashOutput, PublicKey}, }; use tari_core::transactions::{ tari_amount::MicroTari, transaction::{OutputFeatures, Transaction, TransactionOutput, UnblindedOutput, UnblindedOutputBuilder}, transaction_protocol::sender::TransactionSenderMessage, ReceiverTransactionProtocol, SenderTransactionProtocol, }; use tari_crypto::{script::TariScript, tari_utilities::hex::Hex}; use tari_service_framework::reply_channel::SenderService; use tokio::sync::broadcast; use tower::Service; use crate::output_manager_service::{ error::OutputManagerError, service::Balance, storage::models::{KnownOneSidedPaymentScript, SpendingPriority}, }; /// API Request enum #[allow(clippy::large_enum_variant)] pub enum OutputManagerRequest { GetBalance, AddOutput((Box<UnblindedOutput>, Option<SpendingPriority>)), AddOutputWithTxId((TxId, Box<UnblindedOutput>, Option<SpendingPriority>)), AddUnvalidatedOutput((TxId, Box<UnblindedOutput>, Option<SpendingPriority>)), UpdateOutputMetadataSignature(Box<TransactionOutput>), GetRecipientTransaction(TransactionSenderMessage), GetCoinbaseTransaction((TxId, MicroTari, MicroTari, u64)), ConfirmPendingTransaction(TxId), PrepareToSendTransaction { tx_id: TxId, amount: MicroTari, unique_id: Option<Vec<u8>>, parent_public_key: Option<PublicKey>, fee_per_gram: MicroTari, lock_height: Option<u64>, message: String, script: TariScript, }, CreatePayToSelfTransaction { tx_id: TxId, amount: MicroTari, unique_id: Option<Vec<u8>>, parent_public_key: Option<PublicKey>, fee_per_gram: MicroTari, lock_height: Option<u64>, message: String, }, CreatePayToSelfWithOutputs { outputs: Vec<UnblindedOutputBuilder>, fee_per_gram: MicroTari, spending_unique_id: Option<Vec<u8>>, spending_parent_public_key: Option<PublicKey>, }, CancelTransaction(TxId), GetSpentOutputs, GetUnspentOutputs, GetInvalidOutputs, GetSeedWords, ValidateUtxos, RevalidateTxos, CreateCoinSplit((MicroTari, usize, MicroTari, Option<u64>)), ApplyEncryption(Box<Aes256Gcm>), RemoveEncryption, GetPublicRewindKeys, FeeEstimate { amount: MicroTari, fee_per_gram: MicroTari, num_kernels: usize, num_outputs: usize, }, ScanForRecoverableOutputs(Vec<TransactionOutput>), ScanOutputs(Vec<TransactionOutput>), AddKnownOneSidedPaymentScript(KnownOneSidedPaymentScript), CreateOutputWithFeatures { value: MicroTari, features: Box<OutputFeatures>, }, ReinstateCancelledInboundTx(TxId), SetCoinbaseAbandoned(TxId, bool), CreateClaimShaAtomicSwapTransaction(HashOutput, PublicKey, MicroTari), CreateHtlcRefundTransaction(HashOutput, MicroTari), } impl fmt::Display for OutputManagerRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use OutputManagerRequest::*; match self { GetBalance => write!(f, "GetBalance"), AddOutput((v, _)) => write!(f, "AddOutput ({})", v.value), AddOutputWithTxId((t, v, _)) => write!(f, "AddOutputWithTxId ({}: {})", t, v.value), AddUnvalidatedOutput((t, v, _)) => { write!(f, "AddUnvalidatedOutput ({}: {})", t, v.value) }, UpdateOutputMetadataSignature(v) => write!( f, "UpdateOutputMetadataSignature ({}, {}, {})", v.metadata_signature.public_nonce().to_hex(), v.metadata_signature.u().to_hex(), v.metadata_signature.v().to_hex() ), GetRecipientTransaction(_) => write!(f, "GetRecipientTransaction"), ConfirmPendingTransaction(v) => write!(f, "ConfirmPendingTransaction ({})", v), PrepareToSendTransaction { message, .. } => write!(f, "PrepareToSendTransaction ({})", message), CreatePayToSelfTransaction { message, .. } => write!(f, "CreatePayToSelfTransaction ({})", message), CancelTransaction(v) => write!(f, "CancelTransaction ({})", v), GetSpentOutputs => write!(f, "GetSpentOutputs"), GetUnspentOutputs => write!(f, "GetUnspentOutputs"), GetInvalidOutputs => write!(f, "GetInvalidOutputs"), GetSeedWords => write!(f, "GetSeedWords"), ValidateUtxos => write!(f, "ValidateUtxos"), RevalidateTxos => write!(f, "RevalidateTxos"), CreateCoinSplit(v) => write!(f, "CreateCoinSplit ({})", v.0), ApplyEncryption(_) => write!(f, "ApplyEncryption"), RemoveEncryption => write!(f, "RemoveEncryption"), GetCoinbaseTransaction(_) => write!(f, "GetCoinbaseTransaction"), GetPublicRewindKeys => write!(f, "GetPublicRewindKeys"), FeeEstimate { amount, fee_per_gram, num_kernels, num_outputs, } => write!( f, "FeeEstimate(amount: {}, fee_per_gram: {}, num_kernels: {}, num_outputs: {})", amount, fee_per_gram, num_kernels, num_outputs ), ScanForRecoverableOutputs(_) => write!(f, "ScanForRecoverableOutputs"), ScanOutputs(_) => write!(f, "ScanOutputs"), AddKnownOneSidedPaymentScript(_) => write!(f, "AddKnownOneSidedPaymentScript"), CreateOutputWithFeatures { value, features } => { write!(f, "CreateOutputWithFeatures({}, {})", value, features.to_string(),) }, CreatePayToSelfWithOutputs { .. } => write!(f, "CreatePayToSelfWithOutputs"), ReinstateCancelledInboundTx(_) => write!(f, "ReinstateCancelledInboundTx"), SetCoinbaseAbandoned(_, _) => write!(f, "SetCoinbaseAbandoned"), CreateClaimShaAtomicSwapTransaction(output, pre_image, fee_per_gram) => write!( f, "ClaimShaAtomicSwap(output hash: {}, pre_image: {}, fee_per_gram: {} )", output.to_hex(), pre_image, fee_per_gram, ), CreateHtlcRefundTransaction(output, fee_per_gram) => write!( f, "CreateHtlcRefundTransaction(output hash: {}, , fee_per_gram: {} )", output.to_hex(), fee_per_gram, ), } } } /// API Reply enum #[derive(Debug, Clone)] pub enum OutputManagerResponse { Balance(Balance), OutputAdded, OutputMetadataSignatureUpdated, RecipientTransactionGenerated(ReceiverTransactionProtocol), CoinbaseTransaction(Transaction), OutputConfirmed, PendingTransactionConfirmed, PayToSelfTransaction((MicroTari, Transaction)), TransactionToSend(SenderTransactionProtocol), TransactionCancelled, SpentOutputs(Vec<UnblindedOutput>), UnspentOutputs(Vec<UnblindedOutput>), InvalidOutputs(Vec<UnblindedOutput>), SeedWords(Vec<String>), BaseNodePublicKeySet, TxoValidationStarted(u64), Transaction((TxId, Transaction, MicroTari)), EncryptionApplied, EncryptionRemoved, PublicRewindKeys(Box<PublicRewindKeys>), FeeEstimate(MicroTari), RewoundOutputs(Vec<UnblindedOutput>), ScanOutputs(Vec<UnblindedOutput>), AddKnownOneSidedPaymentScript, CreateOutputWithFeatures { output: Box<UnblindedOutputBuilder> }, CreatePayToSelfWithOutputs { transaction: Box<Transaction>, tx_id: TxId }, ReinstatedCancelledInboundTx, CoinbaseAbandonedSet, ClaimHtlcTransaction((TxId, MicroTari, MicroTari, Transaction)), } pub type OutputManagerEventSender = broadcast::Sender<Arc<OutputManagerEvent>>; pub type OutputManagerEventReceiver = broadcast::Receiver<Arc<OutputManagerEvent>>; /// Events that can be published on the Output Manager Service Event Stream #[derive(Clone, Debug, PartialEq, Eq)] pub enum OutputManagerEvent { TxoValidationTimedOut(u64), TxoValidationSuccess(u64), TxoValidationFailure(u64), TxoValidationAborted(u64), TxoValidationDelayed(u64), Error(String), } impl fmt::Display for OutputManagerEvent { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { OutputManagerEvent::TxoValidationTimedOut(tx) => { write!(f, "TxoValidationTimedOut for {}", tx) }, OutputManagerEvent::TxoValidationSuccess(tx) => { write!(f, "TxoValidationSuccess for {}", tx) }, OutputManagerEvent::TxoValidationFailure(tx) => { write!(f, "TxoValidationFailure for {}", tx) }, OutputManagerEvent::TxoValidationAborted(tx) => { write!(f, "TxoValidationAborted for {}", tx) }, OutputManagerEvent::TxoValidationDelayed(tx) => { write!(f, "TxoValidationDelayed for {}", tx) }, OutputManagerEvent::Error(error) => { write!(f, "Error {}", error) }, } } } #[derive(Debug, Clone)] pub struct PublicRewindKeys { pub rewind_public_key: PublicKey, pub rewind_blinding_public_key: PublicKey, } #[derive(Clone)] pub struct OutputManagerHandle { handle: SenderService<OutputManagerRequest, Result<OutputManagerResponse, OutputManagerError>>, event_stream_sender: OutputManagerEventSender, } impl OutputManagerHandle { pub fn new( handle: SenderService<OutputManagerRequest, Result<OutputManagerResponse, OutputManagerError>>, event_stream_sender: OutputManagerEventSender, ) -> Self { OutputManagerHandle { handle, event_stream_sender, } } pub fn get_event_stream(&self) -> OutputManagerEventReceiver { self.event_stream_sender.subscribe() } pub async fn add_output( &mut self, output: UnblindedOutput, spend_priority: Option<SpendingPriority>, ) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::AddOutput((Box::new(output), spend_priority))) .await?? { OutputManagerResponse::OutputAdded => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn add_output_with_tx_id( &mut self, tx_id: TxId, output: UnblindedOutput, spend_priority: Option<SpendingPriority>, ) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::AddOutputWithTxId(( tx_id, Box::new(output), spend_priority, ))) .await?? { OutputManagerResponse::OutputAdded => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn add_unvalidated_output( &mut self, tx_id: TxId, output: UnblindedOutput, spend_priority: Option<SpendingPriority>, ) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::AddUnvalidatedOutput(( tx_id, Box::new(output), spend_priority, ))) .await?? { OutputManagerResponse::OutputAdded => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn create_output_with_features( &mut self, value: MicroTari, features: OutputFeatures, ) -> Result<UnblindedOutputBuilder, OutputManagerError> { match self .handle .call(OutputManagerRequest::CreateOutputWithFeatures { value, features: Box::new(features), }) .await?? { OutputManagerResponse::CreateOutputWithFeatures { output } => Ok(*output), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn update_output_metadata_signature( &mut self, output: TransactionOutput, ) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::UpdateOutputMetadataSignature(Box::new(output))) .await?? { OutputManagerResponse::OutputMetadataSignatureUpdated => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn get_balance(&mut self) -> Result<Balance, OutputManagerError> { match self.handle.call(OutputManagerRequest::GetBalance).await?? { OutputManagerResponse::Balance(b) => Ok(b), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn revalidate_all_outputs(&mut self) -> Result<u64, OutputManagerError> { match self.handle.call(OutputManagerRequest::RevalidateTxos).await?? { OutputManagerResponse::TxoValidationStarted(request_key) => Ok(request_key), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn get_recipient_transaction( &mut self, sender_message: TransactionSenderMessage, ) -> Result<ReceiverTransactionProtocol, OutputManagerError> { match self .handle .call(OutputManagerRequest::GetRecipientTransaction(sender_message)) .await?? { OutputManagerResponse::RecipientTransactionGenerated(rtp) => Ok(rtp), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn get_coinbase_transaction( &mut self, tx_id: TxId, reward: MicroTari, fees: MicroTari, block_height: u64, ) -> Result<Transaction, OutputManagerError> { match self .handle .call(OutputManagerRequest::GetCoinbaseTransaction(( tx_id, reward, fees, block_height, ))) .await?? { OutputManagerResponse::CoinbaseTransaction(tx) => Ok(tx), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn prepare_transaction_to_send( &mut self, tx_id: TxId, amount: MicroTari, unique_id: Option<Vec<u8>>, parent_public_key: Option<PublicKey>, fee_per_gram: MicroTari, lock_height: Option<u64>, message: String, script: TariScript, ) -> Result<SenderTransactionProtocol, OutputManagerError> { match self .handle .call(OutputManagerRequest::PrepareToSendTransaction { tx_id, amount, unique_id, parent_public_key, fee_per_gram, lock_height, message, script, }) .await?? { OutputManagerResponse::TransactionToSend(stp) => Ok(stp), _ => Err(OutputManagerError::UnexpectedApiResponse), } } /// Get a fee estimate for an amount of MicroTari, at a specified fee per gram and given number of kernels and /// outputs. pub async fn fee_estimate( &mut self, amount: MicroTari, fee_per_gram: MicroTari, num_kernels: usize, num_outputs: usize, ) -> Result<MicroTari, OutputManagerError> { match self .handle .call(OutputManagerRequest::FeeEstimate { amount, fee_per_gram, num_kernels, num_outputs, }) .await?? { OutputManagerResponse::FeeEstimate(fee) => Ok(fee), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn confirm_pending_transaction(&mut self, tx_id: TxId) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::ConfirmPendingTransaction(tx_id)) .await?? { OutputManagerResponse::PendingTransactionConfirmed => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn cancel_transaction(&mut self, tx_id: TxId) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::CancelTransaction(tx_id)) .await?? { OutputManagerResponse::TransactionCancelled => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn get_spent_outputs(&mut self) -> Result<Vec<UnblindedOutput>, OutputManagerError> { match self.handle.call(OutputManagerRequest::GetSpentOutputs).await?? { OutputManagerResponse::SpentOutputs(s) => Ok(s), _ => Err(OutputManagerError::UnexpectedApiResponse), } } /// Sorted from lowest value to highest pub async fn get_unspent_outputs(&mut self) -> Result<Vec<UnblindedOutput>, OutputManagerError> { match self.handle.call(OutputManagerRequest::GetUnspentOutputs).await?? { OutputManagerResponse::UnspentOutputs(s) => Ok(s), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn get_invalid_outputs(&mut self) -> Result<Vec<UnblindedOutput>, OutputManagerError> { match self.handle.call(OutputManagerRequest::GetInvalidOutputs).await?? { OutputManagerResponse::InvalidOutputs(s) => Ok(s), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn get_seed_words(&mut self) -> Result<Vec<String>, OutputManagerError> { match self.handle.call(OutputManagerRequest::GetSeedWords).await?? { OutputManagerResponse::SeedWords(s) => Ok(s), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn get_rewind_public_keys(&mut self) -> Result<PublicRewindKeys, OutputManagerError> { match self.handle.call(OutputManagerRequest::GetPublicRewindKeys).await?? { OutputManagerResponse::PublicRewindKeys(rk) => Ok(*rk), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn validate_txos(&mut self) -> Result<u64, OutputManagerError> { match self.handle.call(OutputManagerRequest::ValidateUtxos).await?? { OutputManagerResponse::TxoValidationStarted(request_key) => Ok(request_key), _ => Err(OutputManagerError::UnexpectedApiResponse), } } /// Create a coin split transaction. /// Returns (tx_id, tx, utxos_total_value). pub async fn create_coin_split( &mut self, amount_per_split: MicroTari, split_count: usize, fee_per_gram: MicroTari, lock_height: Option<u64>, ) -> Result<(TxId, Transaction, MicroTari), OutputManagerError> { match self .handle .call(OutputManagerRequest::CreateCoinSplit(( amount_per_split, split_count, fee_per_gram, lock_height, ))) .await?? { OutputManagerResponse::Transaction(ct) => Ok(ct), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn create_htlc_refund_transaction( &mut self, output: HashOutput, fee_per_gram: MicroTari, ) -> Result<(TxId, MicroTari, MicroTari, Transaction), OutputManagerError> { match self .handle .call(OutputManagerRequest::CreateHtlcRefundTransaction(output, fee_per_gram)) .await?? { OutputManagerResponse::ClaimHtlcTransaction(ct) => Ok(ct), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn create_claim_sha_atomic_swap_transaction( &mut self, output: HashOutput, pre_image: PublicKey, fee_per_gram: MicroTari, ) -> Result<(TxId, MicroTari, MicroTari, Transaction), OutputManagerError> { match self .handle .call(OutputManagerRequest::CreateClaimShaAtomicSwapTransaction( output, pre_image, fee_per_gram, )) .await?? { OutputManagerResponse::ClaimHtlcTransaction(ct) => Ok(ct), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn apply_encryption(&mut self, cipher: Aes256Gcm) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::ApplyEncryption(Box::new(cipher))) .await?? { OutputManagerResponse::EncryptionApplied => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn remove_encryption(&mut self) -> Result<(), OutputManagerError> { match self.handle.call(OutputManagerRequest::RemoveEncryption).await?? { OutputManagerResponse::EncryptionRemoved => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn scan_for_recoverable_outputs( &mut self, outputs: Vec<TransactionOutput>, ) -> Result<Vec<UnblindedOutput>, OutputManagerError> { match self .handle .call(OutputManagerRequest::ScanForRecoverableOutputs(outputs)) .await?? { OutputManagerResponse::RewoundOutputs(outputs) => Ok(outputs), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn scan_outputs_for_one_sided_payments( &mut self, outputs: Vec<TransactionOutput>, ) -> Result<Vec<UnblindedOutput>, OutputManagerError> { match self.handle.call(OutputManagerRequest::ScanOutputs(outputs)).await?? { OutputManagerResponse::ScanOutputs(outputs) => Ok(outputs), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn add_known_script(&mut self, script: KnownOneSidedPaymentScript) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::AddKnownOneSidedPaymentScript(script)) .await?? { OutputManagerResponse::AddKnownOneSidedPaymentScript => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn create_send_to_self_with_output( &mut self, outputs: Vec<UnblindedOutputBuilder>, fee_per_gram: MicroTari, spending_unique_id: Option<Vec<u8>>, spending_parent_public_key: Option<PublicKey>, ) -> Result<(TxId, Transaction), OutputManagerError> { match self .handle .call(OutputManagerRequest::CreatePayToSelfWithOutputs { outputs, fee_per_gram, spending_unique_id, spending_parent_public_key, }) .await?? { OutputManagerResponse::CreatePayToSelfWithOutputs { transaction, tx_id } => Ok((tx_id, *transaction)), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn create_pay_to_self_transaction( &mut self, tx_id: TxId, amount: MicroTari, unique_id: Option<Vec<u8>>, parent_public_key: Option<PublicKey>, fee_per_gram: MicroTari, lock_height: Option<u64>, message: String, ) -> Result<(MicroTari, Transaction), OutputManagerError> { match self .handle .call(OutputManagerRequest::CreatePayToSelfTransaction { tx_id, amount, fee_per_gram, lock_height, message, unique_id, parent_public_key, }) .await?? { OutputManagerResponse::PayToSelfTransaction(outputs) => Ok(outputs), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn reinstate_cancelled_inbound_transaction_outputs( &mut self, tx_id: TxId, ) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::ReinstateCancelledInboundTx(tx_id)) .await?? { OutputManagerResponse::ReinstatedCancelledInboundTx => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } pub async fn set_coinbase_abandoned(&mut self, tx_id: TxId, abandoned: bool) -> Result<(), OutputManagerError> { match self .handle .call(OutputManagerRequest::SetCoinbaseAbandoned(tx_id, abandoned)) .await?? { OutputManagerResponse::CoinbaseAbandonedSet => Ok(()), _ => Err(OutputManagerError::UnexpectedApiResponse), } } }
36.953232
118
0.618798